prompt
stringlengths 162
4.26M
| response
stringlengths 109
5.16M
|
---|---|
Generate the Verilog code corresponding to the following Chisel files.
File IngressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class IngressUnit(
ingressNodeId: Int,
cParam: IngressChannelParams,
outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean,
combineSAST: Boolean,
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits)))
}
val io = IO(new IngressUnitIO)
val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2,
flow=combineRCVA))
assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR))
route_buffer.io.enq.bits.head := io.in.bits.head
route_buffer.io.enq.bits.tail := io.in.bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
route_buffer.io.enq.bits.flow := DontCare
} else {
route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U
route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U
route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U
route_buffer.io.enq.bits.flow.egress_node := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNode.U)
)
route_buffer.io.enq.bits.flow.egress_node_id := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNodeId.U)
)
}
route_buffer.io.enq.bits.payload := io.in.bits.payload
route_buffer.io.enq.bits.virt_channel_id := DontCare
io.router_req.bits.src_virt_id := 0.U
io.router_req.bits.flow := route_buffer.io.enq.bits.flow
val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U
route_buffer.io.enq.valid := io.in.valid && (
io.router_req.ready || !io.in.bits.head || at_dest)
io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest
io.in.ready := route_buffer.io.enq.ready && (
io.router_req.ready || !io.in.bits.head || at_dest)
route_q.io.enq.valid := io.router_req.fire
route_q.io.enq.bits := io.router_resp
when (io.in.fire && io.in.bits.head && at_dest) {
route_q.io.enq.valid := true.B
route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (egressParams(o).egressId.U === io.in.bits.egress_id) {
route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B
}
}
}
assert(!(route_q.io.enq.valid && !route_q.io.enq.ready))
val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams),
1, pipe=true))
vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits
io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel
io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow
io.vcalloc_req.bits.in_vc := 0.U
val head = route_buffer.io.deq.bits.head
val tail = route_buffer.io.deq.bits.tail
vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head)
)
io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid &&
head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready)
route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head) &&
(vcalloc_q.io.enq.ready || !head))
route_q.io.deq.ready := (route_buffer.io.deq.fire && tail)
vcalloc_q.io.enq.valid := io.vcalloc_req.fire
vcalloc_q.io.enq.bits := io.vcalloc_resp
assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready))
io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel
io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail
val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail
io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire
val out_bundle = if (combineSAST) {
Wire(Valid(new SwitchBundle(outParams, egressParams)))
} else {
Reg(Valid(new SwitchBundle(outParams, egressParams)))
}
io.out(0) := out_bundle
out_bundle.valid := vcalloc_buffer.io.deq.fire
out_bundle.bits.flit := vcalloc_buffer.io.deq.bits
out_bundle.bits.flit.virt_channel_id := 0.U
val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq
out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh,
vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq)
io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready
io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready
// TODO: We should not generate input/ingress/output/egress units for untraversable channels
if (!cParam.traversable) {
io.in.ready := false.B
io.router_req.valid := false.B
io.router_req.bits := DontCare
io.vcalloc_req.valid := false.B
io.vcalloc_req.bits := DontCare
io.salloc_req.foreach(_.valid := false.B)
io.salloc_req.foreach(_.bits := DontCare)
io.out.foreach(_.valid := false.B)
io.out.foreach(_.bits := DontCare)
}
}
| module IngressUnit_36( // @[IngressUnit.scala:11:7]
input clock, // @[IngressUnit.scala:11:7]
input reset, // @[IngressUnit.scala:11:7]
input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_8, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_9, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_10, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_11, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_12, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_13, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_14, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_15, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_16, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_17, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_18, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_19, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_20, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_21, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_3, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_4, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_5, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_6, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_7, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_8, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_9, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_10, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_11, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_12, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_13, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_14, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_15, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_16, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_17, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_18, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_19, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_20, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_21, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_1_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_10, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_11, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_14, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_15, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_18, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_19, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_20, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_21, // @[IngressUnit.scala:24:14]
input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_8, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_9, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_10, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_11, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_12, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_13, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_14, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_15, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_16, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_17, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_18, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_19, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_20, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_21, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14]
output io_out_0_valid, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14]
output [72:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14]
output [3:0] io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14]
output [5:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14]
output [5:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14]
output [4:0] io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14]
output io_in_ready, // @[IngressUnit.scala:24:14]
input io_in_valid, // @[IngressUnit.scala:24:14]
input io_in_bits_head, // @[IngressUnit.scala:24:14]
input [72:0] io_in_bits_payload, // @[IngressUnit.scala:24:14]
input [5:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14]
);
wire _GEN; // @[Decoupled.scala:51:35]
wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_2; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_3; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_4; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_5; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_6; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_7; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_8; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_9; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_10; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_11; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_12; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_13; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_14; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_15; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_16; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_17; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_18; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_19; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_20; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_21; // @[IngressUnit.scala:76:25]
wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_head; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30]
wire [72:0] _vcalloc_buffer_io_deq_bits_payload; // @[IngressUnit.scala:75:30]
wire [3:0] _vcalloc_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:75:30]
wire [5:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:75:30]
wire [2:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:75:30]
wire [5:0] _vcalloc_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:75:30]
wire [2:0] _vcalloc_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:75:30]
wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23]
wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23]
wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28]
wire [72:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28]
wire [3:0] _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28]
wire [5:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28]
wire [5:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28]
wire [4:0] _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T = io_in_bits_egress_id == 6'h28; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_1 = io_in_bits_egress_id == 6'h2B; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_2 = io_in_bits_egress_id == 6'h2E; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_3 = io_in_bits_egress_id == 6'h31; // @[IngressUnit.scala:30:72]
wire [2:0] _GEN_0 = {1'h0, _route_buffer_io_enq_bits_flow_egress_node_id_T_1, 1'h0} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_3 ? 3'h6 : 3'h0); // @[Mux.scala:30:73]
wire [2:0] _GEN_1 = {_route_buffer_io_enq_bits_flow_egress_node_id_T_2, _GEN_0[2:1]}; // @[Mux.scala:30:73]
assign _GEN = _route_buffer_io_enq_ready & io_in_valid & io_in_bits_head & _GEN_1 == 3'h5; // @[Decoupled.scala:51:35]
wire route_q_io_enq_valid = _GEN | io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head & _GEN_1 != 3'h5; // @[Decoupled.scala:51:35]
wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}]
wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29]
wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File MSHR.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module MSHR( // @[MSHR.scala:84:7]
input clock, // @[MSHR.scala:84:7]
input reset, // @[MSHR.scala:84:7]
input io_allocate_valid, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_0, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_1, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_2, // @[MSHR.scala:86:14]
input io_allocate_bits_control, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14]
input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14]
input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14]
input io_allocate_bits_repeat, // @[MSHR.scala:86:14]
input io_directory_valid, // @[MSHR.scala:86:14]
input io_directory_bits_dirty, // @[MSHR.scala:86:14]
input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14]
input io_directory_bits_clients, // @[MSHR.scala:86:14]
input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14]
input io_directory_bits_hit, // @[MSHR.scala:86:14]
input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14]
output io_status_valid, // @[MSHR.scala:86:14]
output [9:0] io_status_bits_set, // @[MSHR.scala:86:14]
output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14]
output [2:0] io_status_bits_way, // @[MSHR.scala:86:14]
output io_status_bits_blockB, // @[MSHR.scala:86:14]
output io_status_bits_nestB, // @[MSHR.scala:86:14]
output io_status_bits_blockC, // @[MSHR.scala:86:14]
output io_status_bits_nestC, // @[MSHR.scala:86:14]
input io_schedule_ready, // @[MSHR.scala:86:14]
output io_schedule_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_a_valid, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14]
output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14]
output io_schedule_bits_b_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14]
output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14]
output io_schedule_bits_c_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14]
output io_schedule_bits_d_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14]
output io_schedule_bits_e_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14]
output io_schedule_bits_x_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14]
output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14]
output io_schedule_bits_reload, // @[MSHR.scala:86:14]
input io_sinkc_valid, // @[MSHR.scala:86:14]
input io_sinkc_bits_last, // @[MSHR.scala:86:14]
input [9:0] io_sinkc_bits_set, // @[MSHR.scala:86:14]
input [12:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14]
input io_sinkc_bits_data, // @[MSHR.scala:86:14]
input io_sinkd_valid, // @[MSHR.scala:86:14]
input io_sinkd_bits_last, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14]
input io_sinkd_bits_denied, // @[MSHR.scala:86:14]
input io_sinke_valid, // @[MSHR.scala:86:14]
input [2:0] io_sinke_bits_sink, // @[MSHR.scala:86:14]
input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14]
input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14]
input io_nestedwb_b_toN, // @[MSHR.scala:86:14]
input io_nestedwb_b_toB, // @[MSHR.scala:86:14]
input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14]
input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14]
);
wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38]
wire final_meta_writeback_clients; // @[MSHR.scala:215:38]
wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38]
wire final_meta_writeback_dirty; // @[MSHR.scala:215:38]
wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7]
wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7]
wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7]
wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7]
wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7]
wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7]
wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7]
wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7]
wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7]
wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7]
wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7]
wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7]
wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7]
wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7]
wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7]
wire [9:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7]
wire [12:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7]
wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7]
wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7]
wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7]
wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7]
wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7]
wire [2:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7]
wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7]
wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7]
wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7]
wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_source = 3'h0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_source = 3'h0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_sink = 3'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7]
wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68]
wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80]
wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21]
wire invalid_clients = 1'h0; // @[MSHR.scala:268:21]
wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137]
wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137]
wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21]
wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21]
wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70]
wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34]
wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34]
wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34]
wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40]
wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93]
wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28]
wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39]
wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105]
wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55]
wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91]
wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41]
wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41]
wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41]
wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51]
wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64]
wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41]
wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41]
wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57]
wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41]
wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43]
wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40]
wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66]
wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41]
wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41]
wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41]
wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41]
wire no_wait; // @[MSHR.scala:183:83]
wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7]
wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7]
wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockB_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestB_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockC_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestC_0; // @[MSHR.scala:84:7]
wire io_status_valid_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7]
wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7]
wire io_schedule_valid_0; // @[MSHR.scala:84:7]
reg request_valid; // @[MSHR.scala:97:30]
assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30]
reg request_prio_0; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20]
reg request_prio_1; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20]
reg request_prio_2; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20]
reg request_control; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_opcode; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_param; // @[MSHR.scala:98:20]
reg [2:0] request_size; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_source; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20]
reg [12:0] request_tag; // @[MSHR.scala:98:20]
assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_offset; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_put; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20]
reg [9:0] request_set; // @[MSHR.scala:98:20]
assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
reg meta_valid; // @[MSHR.scala:99:27]
reg meta_dirty; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17]
reg [1:0] meta_state; // @[MSHR.scala:100:17]
reg meta_clients; // @[MSHR.scala:100:17]
wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39]
wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
reg [12:0] meta_tag; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17]
reg meta_hit; // @[MSHR.scala:100:17]
reg [2:0] meta_way; // @[MSHR.scala:100:17]
assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38]
reg s_rprobe; // @[MSHR.scala:121:33]
reg w_rprobeackfirst; // @[MSHR.scala:122:33]
reg w_rprobeacklast; // @[MSHR.scala:123:33]
reg s_release; // @[MSHR.scala:124:33]
reg w_releaseack; // @[MSHR.scala:125:33]
reg s_pprobe; // @[MSHR.scala:126:33]
reg s_acquire; // @[MSHR.scala:127:33]
reg s_flush; // @[MSHR.scala:128:33]
reg w_grantfirst; // @[MSHR.scala:129:33]
reg w_grantlast; // @[MSHR.scala:130:33]
reg w_grant; // @[MSHR.scala:131:33]
reg w_pprobeackfirst; // @[MSHR.scala:132:33]
reg w_pprobeacklast; // @[MSHR.scala:133:33]
reg w_pprobeack; // @[MSHR.scala:134:33]
reg s_grantack; // @[MSHR.scala:136:33]
reg s_execute; // @[MSHR.scala:137:33]
reg w_grantack; // @[MSHR.scala:138:33]
reg s_writeback; // @[MSHR.scala:139:33]
reg [2:0] sink; // @[MSHR.scala:147:17]
assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17]
reg gotT; // @[MSHR.scala:148:17]
reg bad_grant; // @[MSHR.scala:149:22]
assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22]
reg probes_done; // @[MSHR.scala:150:24]
reg probes_toN; // @[MSHR.scala:151:23]
reg probes_noT; // @[MSHR.scala:152:23]
wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28]
wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45]
wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62]
wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}]
wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82]
wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}]
wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103]
wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}]
assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}]
assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40]
wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39]
wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}]
wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}]
wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96]
assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}]
assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93]
assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28]
assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28]
wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43]
wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64]
wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}]
wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85]
wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}]
assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}]
assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39]
wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33]
wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}]
wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}]
assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}]
assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83]
wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31]
wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}]
assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}]
assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55]
wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31]
wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44]
assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}]
assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41]
wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32]
wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}]
assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}]
assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64]
wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31]
wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}]
assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}]
assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57]
wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31]
assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}]
assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43]
wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31]
assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}]
assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40]
wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34]
wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}]
wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70]
wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}]
assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}]
assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66]
wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49]
wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}]
wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}]
wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49]
wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}]
assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}]
assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105]
wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71]
wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71]
wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71]
wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71]
wire final_meta_writeback_hit; // @[MSHR.scala:215:38]
wire req_clientBit = request_source == 6'h21; // @[Parameters.scala:46:9]
wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12]
wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12]
wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _req_needT_T_2; // @[Parameters.scala:270:13]
assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13]
wire _excluded_client_T_6; // @[Parameters.scala:279:117]
assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117]
wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42]
wire _req_needT_T_3; // @[Parameters.scala:270:42]
assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42]
wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11]
assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11]
wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42]
wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _req_needT_T_6; // @[Parameters.scala:271:14]
assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14]
wire _req_acquire_T; // @[MSHR.scala:219:36]
assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14]
wire _excluded_client_T_1; // @[Parameters.scala:279:12]
assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12]
wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52]
wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89]
wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52]
wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}]
wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}]
wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81]
wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}]
wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}]
wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}]
wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65]
wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}]
wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55]
wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78]
wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78]
assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78]
wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70]
wire _evict_T_2; // @[MSHR.scala:317:26]
assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _before_T_1; // @[MSHR.scala:317:26]
assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}]
wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}]
wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43]
assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43]
wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75]
wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}]
wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}]
wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54]
wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}]
wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45]
wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}]
wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}]
wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40]
wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40]
assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40]
wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65]
assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65]
wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41]
wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}]
wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72]
wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}]
wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70]
wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70]
wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53]
assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53]
wire _evict_T_1; // @[MSHR.scala:317:26]
assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire _before_T; // @[MSHR.scala:317:26]
assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70]
wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70]
wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55]
wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70]
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70]
wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66]
wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}]
wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}]
wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40]
assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30]
wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54]
wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}]
assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21]
assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21]
assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36]
assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36]
wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9]
wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}]
wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}]
wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38]
wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}]
wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}]
wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}]
wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106]
wire excluded_client = _excluded_client_T_9 & req_clientBit; // @[Parameters.scala:46:9]
wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56]
wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70]
assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}]
wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51]
wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55]
wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52]
wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}]
wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}]
assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38]
assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91]
wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42]
wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70]
wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}]
assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}]
assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41]
wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42]
assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}]
assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41]
wire _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53]
assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}]
assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51]
assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41]
assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41]
assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}]
assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41]
wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42]
wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53]
wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53]
wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89]
wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79]
assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41]
wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42]
assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}]
assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41]
wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32]
wire [3:0] evict; // @[MSHR.scala:314:26]
wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32]
wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32]
wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32]
assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32]
assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39]
wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39]
assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39]
assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76]
wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76]
assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76]
assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32]
assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] before_0; // @[MSHR.scala:314:26]
wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32]
wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11]
assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] after; // @[MSHR.scala:314:26]
wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26]
wire _after_T; // @[MSHR.scala:317:26]
assign _after_T = _GEN_9; // @[MSHR.scala:317:26]
wire _prior_T; // @[MSHR.scala:317:26]
assign _prior_T = _GEN_9; // @[MSHR.scala:317:26]
wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32]
wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26]
wire _after_T_1; // @[MSHR.scala:317:26]
assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire _prior_T_1; // @[MSHR.scala:317:26]
assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32]
wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32]
assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32]
assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39]
wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39]
assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39]
assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76]
wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76]
assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76]
assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26]
wire _after_T_3; // @[MSHR.scala:317:26]
assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26]
wire _prior_T_3; // @[MSHR.scala:317:26]
assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26]
assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire probe_bit = io_sinkc_bits_source_0 == 6'h21; // @[Parameters.scala:46:9]
wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9]
wire _last_probe_T; // @[MSHR.scala:459:33]
assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33]
wire _probes_done_T; // @[MSHR.scala:467:32]
assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32]
wire _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66]
wire _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}]
wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}]
wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11]
wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43]
wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75]
wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9]
wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}]
wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53]
wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}]
wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42]
wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55]
wire _w_rprobeacklast_T; // @[MSHR.scala:471:55]
assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55]
wire _w_pprobeacklast_T; // @[MSHR.scala:473:55]
assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55]
wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}]
wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42]
wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}]
wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77]
wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}]
wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}]
wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32]
wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33]
wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}]
wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35]
wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40]
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12]
wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _new_needT_T_2; // @[Parameters.scala:270:13]
assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13]
wire _new_skipProbe_T_5; // @[Parameters.scala:279:117]
assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117]
wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42]
wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _new_needT_T_6; // @[Parameters.scala:271:14]
assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14]
wire _new_skipProbe_T; // @[Parameters.scala:279:12]
assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12]
wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52]
wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89]
wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire new_clientBit = new_request_source == 6'h21; // @[Parameters.scala:46:9]
wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}]
wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}]
wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}]
wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9]
wire [3:0] prior; // @[MSHR.scala:314:26]
wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32]
wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_65( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_31 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_37 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_61 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_65 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_67 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_73 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_79 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_85 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_87 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_wo_ready_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_wo_ready_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [2050:0] _c_opcodes_set_T_1 = 2051'h0; // @[Monitor.scala:767:54]
wire [2050:0] _c_sizes_set_T_1 = 2051'h0; // @[Monitor.scala:768:52]
wire [10:0] _c_opcodes_set_T = 11'h0; // @[Monitor.scala:767:79]
wire [10:0] _c_sizes_set_T = 11'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [255:0] _c_set_wo_ready_T = 256'h1; // @[OneHot.scala:58:35]
wire [255:0] _c_set_T = 256'h1; // @[OneHot.scala:58:35]
wire [515:0] c_opcodes_set = 516'h0; // @[Monitor.scala:740:34]
wire [515:0] c_sizes_set = 516'h0; // @[Monitor.scala:741:34]
wire [128:0] c_set = 129'h0; // @[Monitor.scala:738:34]
wire [128:0] c_set_wo_ready = 129'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [7:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_55 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_56 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_57 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_58 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_59 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_60 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_61 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_62 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_63 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_64 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_65 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 8'h50; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] _source_ok_T_1 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_7 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_13 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_19 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 6'h10; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 6'h11; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 6'h12; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 6'h13; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = io_in_a_bits_source_0 == 8'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31]
wire _source_ok_T_26 = io_in_a_bits_source_0 == 8'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31]
wire [3:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_27 = io_in_a_bits_source_0[7:4]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_33 = io_in_a_bits_source_0[7:4]; // @[Monitor.scala:36:7]
wire _source_ok_T_28 = _source_ok_T_27 == 4'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_32 = _source_ok_T_30; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31]
wire [3:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[3:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_34 = _source_ok_T_33 == 4'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_38 = _source_ok_T_36; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31]
wire _source_ok_T_39 = io_in_a_bits_source_0 == 8'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31]
wire _source_ok_T_40 = io_in_a_bits_source_0 == 8'h80; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_10 = _source_ok_T_40; // @[Parameters.scala:1138:31]
wire _source_ok_T_41 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_49 | _source_ok_WIRE_10; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {15'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_4 = _uncommonBits_T_4[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_5 = _uncommonBits_T_5[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_10 = _uncommonBits_T_10[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_11 = _uncommonBits_T_11[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_16 = _uncommonBits_T_16[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_17 = _uncommonBits_T_17[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_22 = _uncommonBits_T_22[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_23 = _uncommonBits_T_23[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_28 = _uncommonBits_T_28[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_29 = _uncommonBits_T_29[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_34 = _uncommonBits_T_34[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_35 = _uncommonBits_T_35[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_40 = _uncommonBits_T_40[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_41 = _uncommonBits_T_41[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_46 = _uncommonBits_T_46[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_47 = _uncommonBits_T_47[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_52 = _uncommonBits_T_52[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_53 = _uncommonBits_T_53[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_54 = _uncommonBits_T_54[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_55 = _uncommonBits_T_55[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_58 = _uncommonBits_T_58[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_59 = _uncommonBits_T_59[3:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_60 = _uncommonBits_T_60[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_61 = _uncommonBits_T_61[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_62 = _uncommonBits_T_62[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_64 = _uncommonBits_T_64[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_65 = _uncommonBits_T_65[3:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_50 = io_in_d_bits_source_0 == 8'h50; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_50; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] _source_ok_T_51 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_57 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_63 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_69 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_52 = _source_ok_T_51 == 6'h10; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_56; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_58 = _source_ok_T_57 == 6'h11; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_62 = _source_ok_T_60; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_62; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_64 = _source_ok_T_63 == 6'h12; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_66 = _source_ok_T_64; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_68 = _source_ok_T_66; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_68; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_70 = _source_ok_T_69 == 6'h13; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_74 = _source_ok_T_72; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_74; // @[Parameters.scala:1138:31]
wire _source_ok_T_75 = io_in_d_bits_source_0 == 8'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_5 = _source_ok_T_75; // @[Parameters.scala:1138:31]
wire _source_ok_T_76 = io_in_d_bits_source_0 == 8'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_76; // @[Parameters.scala:1138:31]
wire [3:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[3:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_77 = io_in_d_bits_source_0[7:4]; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_T_83 = io_in_d_bits_source_0[7:4]; // @[Monitor.scala:36:7]
wire _source_ok_T_78 = _source_ok_T_77 == 4'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_80 = _source_ok_T_78; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_7 = _source_ok_T_82; // @[Parameters.scala:1138:31]
wire [3:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[3:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_84 = _source_ok_T_83 == 4'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_86 = _source_ok_T_84; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_88 = _source_ok_T_86; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_8 = _source_ok_T_88; // @[Parameters.scala:1138:31]
wire _source_ok_T_89 = io_in_d_bits_source_0 == 8'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_9 = _source_ok_T_89; // @[Parameters.scala:1138:31]
wire _source_ok_T_90 = io_in_d_bits_source_0 == 8'h80; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_10 = _source_ok_T_90; // @[Parameters.scala:1138:31]
wire _source_ok_T_91 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_92 = _source_ok_T_91 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_93 = _source_ok_T_92 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_94 = _source_ok_T_93 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_96 = _source_ok_T_95 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_97 = _source_ok_T_96 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_98 = _source_ok_T_97 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_99 = _source_ok_T_98 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_99 | _source_ok_WIRE_1_10; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1266 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1266; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1266; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [7:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_1334 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1334; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1334; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1334; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [7:0] source_1; // @[Monitor.scala:541:22]
reg [128:0] inflight; // @[Monitor.scala:614:27]
reg [515:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [515:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [128:0] a_set; // @[Monitor.scala:626:34]
wire [128:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [515:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [515:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [10:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [10:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [10:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [10:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [10:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [10:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [10:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [10:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [10:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [515:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [515:0] _a_opcode_lookup_T_6 = {512'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [515:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [515:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [515:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [515:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[515:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [255:0] _GEN_2 = 256'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [255:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1199 = _T_1266 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1199 ? _a_set_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1199 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1199 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [10:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [10:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [10:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [2050:0] _a_opcodes_set_T_1 = {2047'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1199 ? _a_opcodes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [2050:0] _a_sizes_set_T_1 = {2047'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1199 ? _a_sizes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [128:0] d_clr; // @[Monitor.scala:664:34]
wire [128:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [515:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [515:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1245 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [255:0] _GEN_5 = 256'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1245 & ~d_release_ack ? _d_clr_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1214 = _T_1334 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1214 ? _d_clr_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_5 = 2063'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1214 ? _d_opcodes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [2062:0] _d_sizes_clr_T_5 = 2063'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1214 ? _d_sizes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [128:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [128:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [128:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [515:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [515:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [515:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [515:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [515:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [515:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [128:0] inflight_1; // @[Monitor.scala:726:35]
wire [128:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [515:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [515:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [515:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [515:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [515:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [515:0] _c_opcode_lookup_T_6 = {512'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [515:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [515:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [515:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [515:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[515:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [128:0] d_clr_1; // @[Monitor.scala:774:34]
wire [128:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [515:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [515:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1310 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1310 & d_release_ack_1 ? _d_clr_wo_ready_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1292 = _T_1334 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1292 ? _d_clr_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_11 = 2063'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1292 ? _d_opcodes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [2062:0] _d_sizes_clr_T_11 = 2063'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1292 ? _d_sizes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 8'h0; // @[Monitor.scala:36:7, :795:113]
wire [128:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [128:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [515:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [515:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [515:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [515:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
File consts.scala:
//******************************************************************************
// Copyright (c) 2011 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Constants
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.common.constants
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util.Str
import freechips.rocketchip.rocket.RVCExpander
/**
* Mixin for issue queue types
*/
trait IQType
{
val IQT_SZ = 3
val IQT_INT = 1.U(IQT_SZ.W)
val IQT_MEM = 2.U(IQT_SZ.W)
val IQT_FP = 4.U(IQT_SZ.W)
val IQT_MFP = 6.U(IQT_SZ.W)
}
/**
* Mixin for scalar operation constants
*/
trait ScalarOpConstants
{
val X = BitPat("b?")
val Y = BitPat("b1")
val N = BitPat("b0")
//************************************
// Extra Constants
// Which branch predictor predicted us
val BSRC_SZ = 2
val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred
val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred
val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred
val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution
//************************************
// Control Signals
// CFI types
val CFI_SZ = 3
val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction
val CFI_BR = 1.U(CFI_SZ.W) // Branch
val CFI_JAL = 2.U(CFI_SZ.W) // JAL
val CFI_JALR = 3.U(CFI_SZ.W) // JALR
// PC Select Signal
val PC_PLUS4 = 0.U(2.W) // PC + 4
val PC_BRJMP = 1.U(2.W) // brjmp_target
val PC_JALR = 2.U(2.W) // jump_reg_target
// Branch Type
val BR_N = 0.U(4.W) // Next
val BR_NE = 1.U(4.W) // Branch on NotEqual
val BR_EQ = 2.U(4.W) // Branch on Equal
val BR_GE = 3.U(4.W) // Branch on Greater/Equal
val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned
val BR_LT = 5.U(4.W) // Branch on Less Than
val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned
val BR_J = 7.U(4.W) // Jump
val BR_JR = 8.U(4.W) // Jump Register
// RS1 Operand Select Signal
val OP1_RS1 = 0.U(2.W) // Register Source #1
val OP1_ZERO= 1.U(2.W)
val OP1_PC = 2.U(2.W)
val OP1_X = BitPat("b??")
// RS2 Operand Select Signal
val OP2_RS2 = 0.U(3.W) // Register Source #2
val OP2_IMM = 1.U(3.W) // immediate
val OP2_ZERO= 2.U(3.W) // constant 0
val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4)
val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1
val OP2_X = BitPat("b???")
// Register File Write Enable Signal
val REN_0 = false.B
val REN_1 = true.B
// Is 32b Word or 64b Doubldword?
val SZ_DW = 1
val DW_X = true.B // Bool(xLen==64)
val DW_32 = false.B
val DW_64 = true.B
val DW_XPR = true.B // Bool(xLen==64)
// Memory Enable Signal
val MEN_0 = false.B
val MEN_1 = true.B
val MEN_X = false.B
// Immediate Extend Select
val IS_I = 0.U(3.W) // I-Type (LD,ALU)
val IS_S = 1.U(3.W) // S-Type (ST)
val IS_B = 2.U(3.W) // SB-Type (BR)
val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC)
val IS_J = 4.U(3.W) // UJ-Type (J/JAL)
val IS_X = BitPat("b???")
// Decode Stage Control Signals
val RT_FIX = 0.U(2.W)
val RT_FLT = 1.U(2.W)
val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc)
val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.)
// TODO rename RT_NAR
// Micro-op opcodes
// TODO change micro-op opcodes into using enum
val UOPC_SZ = 7
val uopX = BitPat.dontCare(UOPC_SZ)
val uopNOP = 0.U(UOPC_SZ.W)
val uopLD = 1.U(UOPC_SZ.W)
val uopSTA = 2.U(UOPC_SZ.W) // store address generation
val uopSTD = 3.U(UOPC_SZ.W) // store data generation
val uopLUI = 4.U(UOPC_SZ.W)
val uopADDI = 5.U(UOPC_SZ.W)
val uopANDI = 6.U(UOPC_SZ.W)
val uopORI = 7.U(UOPC_SZ.W)
val uopXORI = 8.U(UOPC_SZ.W)
val uopSLTI = 9.U(UOPC_SZ.W)
val uopSLTIU= 10.U(UOPC_SZ.W)
val uopSLLI = 11.U(UOPC_SZ.W)
val uopSRAI = 12.U(UOPC_SZ.W)
val uopSRLI = 13.U(UOPC_SZ.W)
val uopSLL = 14.U(UOPC_SZ.W)
val uopADD = 15.U(UOPC_SZ.W)
val uopSUB = 16.U(UOPC_SZ.W)
val uopSLT = 17.U(UOPC_SZ.W)
val uopSLTU = 18.U(UOPC_SZ.W)
val uopAND = 19.U(UOPC_SZ.W)
val uopOR = 20.U(UOPC_SZ.W)
val uopXOR = 21.U(UOPC_SZ.W)
val uopSRA = 22.U(UOPC_SZ.W)
val uopSRL = 23.U(UOPC_SZ.W)
val uopBEQ = 24.U(UOPC_SZ.W)
val uopBNE = 25.U(UOPC_SZ.W)
val uopBGE = 26.U(UOPC_SZ.W)
val uopBGEU = 27.U(UOPC_SZ.W)
val uopBLT = 28.U(UOPC_SZ.W)
val uopBLTU = 29.U(UOPC_SZ.W)
val uopCSRRW= 30.U(UOPC_SZ.W)
val uopCSRRS= 31.U(UOPC_SZ.W)
val uopCSRRC= 32.U(UOPC_SZ.W)
val uopCSRRWI=33.U(UOPC_SZ.W)
val uopCSRRSI=34.U(UOPC_SZ.W)
val uopCSRRCI=35.U(UOPC_SZ.W)
val uopJ = 36.U(UOPC_SZ.W)
val uopJAL = 37.U(UOPC_SZ.W)
val uopJALR = 38.U(UOPC_SZ.W)
val uopAUIPC= 39.U(UOPC_SZ.W)
//val uopSRET = 40.U(UOPC_SZ.W)
val uopCFLSH= 41.U(UOPC_SZ.W)
val uopFENCE= 42.U(UOPC_SZ.W)
val uopADDIW= 43.U(UOPC_SZ.W)
val uopADDW = 44.U(UOPC_SZ.W)
val uopSUBW = 45.U(UOPC_SZ.W)
val uopSLLIW= 46.U(UOPC_SZ.W)
val uopSLLW = 47.U(UOPC_SZ.W)
val uopSRAIW= 48.U(UOPC_SZ.W)
val uopSRAW = 49.U(UOPC_SZ.W)
val uopSRLIW= 50.U(UOPC_SZ.W)
val uopSRLW = 51.U(UOPC_SZ.W)
val uopMUL = 52.U(UOPC_SZ.W)
val uopMULH = 53.U(UOPC_SZ.W)
val uopMULHU= 54.U(UOPC_SZ.W)
val uopMULHSU=55.U(UOPC_SZ.W)
val uopMULW = 56.U(UOPC_SZ.W)
val uopDIV = 57.U(UOPC_SZ.W)
val uopDIVU = 58.U(UOPC_SZ.W)
val uopREM = 59.U(UOPC_SZ.W)
val uopREMU = 60.U(UOPC_SZ.W)
val uopDIVW = 61.U(UOPC_SZ.W)
val uopDIVUW= 62.U(UOPC_SZ.W)
val uopREMW = 63.U(UOPC_SZ.W)
val uopREMUW= 64.U(UOPC_SZ.W)
val uopFENCEI = 65.U(UOPC_SZ.W)
// = 66.U(UOPC_SZ.W)
val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen)
val uopFMV_W_X = 68.U(UOPC_SZ.W)
val uopFMV_D_X = 69.U(UOPC_SZ.W)
val uopFMV_X_W = 70.U(UOPC_SZ.W)
val uopFMV_X_D = 71.U(UOPC_SZ.W)
val uopFSGNJ_S = 72.U(UOPC_SZ.W)
val uopFSGNJ_D = 73.U(UOPC_SZ.W)
val uopFCVT_S_D = 74.U(UOPC_SZ.W)
val uopFCVT_D_S = 75.U(UOPC_SZ.W)
val uopFCVT_S_X = 76.U(UOPC_SZ.W)
val uopFCVT_D_X = 77.U(UOPC_SZ.W)
val uopFCVT_X_S = 78.U(UOPC_SZ.W)
val uopFCVT_X_D = 79.U(UOPC_SZ.W)
val uopCMPR_S = 80.U(UOPC_SZ.W)
val uopCMPR_D = 81.U(UOPC_SZ.W)
val uopFCLASS_S = 82.U(UOPC_SZ.W)
val uopFCLASS_D = 83.U(UOPC_SZ.W)
val uopFMINMAX_S = 84.U(UOPC_SZ.W)
val uopFMINMAX_D = 85.U(UOPC_SZ.W)
// = 86.U(UOPC_SZ.W)
val uopFADD_S = 87.U(UOPC_SZ.W)
val uopFSUB_S = 88.U(UOPC_SZ.W)
val uopFMUL_S = 89.U(UOPC_SZ.W)
val uopFADD_D = 90.U(UOPC_SZ.W)
val uopFSUB_D = 91.U(UOPC_SZ.W)
val uopFMUL_D = 92.U(UOPC_SZ.W)
val uopFMADD_S = 93.U(UOPC_SZ.W)
val uopFMSUB_S = 94.U(UOPC_SZ.W)
val uopFNMADD_S = 95.U(UOPC_SZ.W)
val uopFNMSUB_S = 96.U(UOPC_SZ.W)
val uopFMADD_D = 97.U(UOPC_SZ.W)
val uopFMSUB_D = 98.U(UOPC_SZ.W)
val uopFNMADD_D = 99.U(UOPC_SZ.W)
val uopFNMSUB_D = 100.U(UOPC_SZ.W)
val uopFDIV_S = 101.U(UOPC_SZ.W)
val uopFDIV_D = 102.U(UOPC_SZ.W)
val uopFSQRT_S = 103.U(UOPC_SZ.W)
val uopFSQRT_D = 104.U(UOPC_SZ.W)
val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline
val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET
val uopSFENCE = 107.U(UOPC_SZ.W)
val uopROCC = 108.U(UOPC_SZ.W)
val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2"
// The Bubble Instruction (Machine generated NOP)
// Insert (XOR x0,x0,x0) which is different from software compiler
// generated NOPs which are (ADDI x0, x0, 0).
// Reasoning for this is to let visualizers and stat-trackers differentiate
// between software NOPs and machine-generated Bubbles in the pipeline.
val BUBBLE = (0x4033).U(32.W)
def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = {
val uop = Wire(new boom.v3.common.MicroOp)
uop := DontCare // Overridden in the following lines
uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior
uop.bypassable := false.B
uop.fp_val := false.B
uop.uses_stq := false.B
uop.uses_ldq := false.B
uop.pdst := 0.U
uop.dst_rtype := RT_X
val cs = Wire(new boom.v3.common.CtrlSignals())
cs := DontCare // Overridden in the following lines
cs.br_type := BR_N
cs.csr_cmd := freechips.rocketchip.rocket.CSR.N
cs.is_load := false.B
cs.is_sta := false.B
cs.is_std := false.B
uop.ctrl := cs
uop
}
}
/**
* Mixin for RISCV constants
*/
trait RISCVConstants
{
// abstract out instruction decode magic numbers
val RD_MSB = 11
val RD_LSB = 7
val RS1_MSB = 19
val RS1_LSB = 15
val RS2_MSB = 24
val RS2_LSB = 20
val RS3_MSB = 31
val RS3_LSB = 27
val CSR_ADDR_MSB = 31
val CSR_ADDR_LSB = 20
val CSR_ADDR_SZ = 12
// location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.)
val SHAMT_5_BIT = 25
val LONGEST_IMM_SZ = 20
val X0 = 0.U
val RA = 1.U // return address register
// memory consistency model
// The C/C++ atomics MCM requires that two loads to the same address maintain program order.
// The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior).
val MCM_ORDER_DEPENDENT_LOADS = true
val jal_opc = (0x6f).U
val jalr_opc = (0x67).U
def GetUop(inst: UInt): UInt = inst(6,0)
def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB)
def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB)
def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = {
val rvc_exp = Module(new RVCExpander)
rvc_exp.io.in := inst
Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst)
}
// Note: Accepts only EXPANDED rvc instructions
def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = {
val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt
}
// Note: Accepts only EXPANDED rvc instructions
def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = {
val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt
}
// Note: Accepts only EXPANDED rvc instructions
def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = {
val bdecode = Module(new boom.v3.exu.BranchDecode)
bdecode.io.inst := inst
bdecode.io.pc := 0.U
bdecode.io.out.cfi_type
}
}
/**
* Mixin for exception cause constants
*/
trait ExcCauseConstants
{
// a memory disambigious misspeculation occurred
val MINI_EXCEPTION_MEM_ORDERING = 16.U
val MINI_EXCEPTION_CSR_REPLAY = 17.U
require (!freechips.rocketchip.rocket.Causes.all.contains(16))
require (!freechips.rocketchip.rocket.Causes.all.contains(17))
}
File issue-slot.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Issue Slot Logic
//--------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot.
// TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores.
// TODO Disable ldspec for FP queue.
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util._
import FUConstants._
/**
* IO bundle to interact with Issue slot
*
* @param numWakeupPorts number of wakeup ports for the slot
*/
class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle
{
val valid = Output(Bool())
val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely?
val request = Output(Bool())
val request_hp = Output(Bool())
val grant = Input(Bool())
val brupdate = Input(new BrUpdateInfo())
val kill = Input(Bool()) // pipeline flush
val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant)
val ldspec_miss = Input(Bool()) // Previous cycle's speculative load wakeup was mispredicted.
val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new IqWakeup(maxPregSz))))
val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W)))
val spec_ld_wakeup = Flipped(Vec(memWidth, Valid(UInt(width=maxPregSz.W))))
val in_uop = Flipped(Valid(new MicroOp())) // if valid, this WILL overwrite an entry!
val out_uop = Output(new MicroOp()) // the updated slot uop; will be shifted upwards in a collasping queue.
val uop = Output(new MicroOp()) // the current Slot's uop. Sent down the pipeline when issued.
val debug = {
val result = new Bundle {
val p1 = Bool()
val p2 = Bool()
val p3 = Bool()
val ppred = Bool()
val state = UInt(width=2.W)
}
Output(result)
}
}
/**
* Single issue slot. Holds a uop within the issue queue
*
* @param numWakeupPorts number of wakeup ports
*/
class IssueSlot(val numWakeupPorts: Int)(implicit p: Parameters)
extends BoomModule
with IssueUnitConstants
{
val io = IO(new IssueSlotIO(numWakeupPorts))
// slot invalid?
// slot is valid, holding 1 uop
// slot is valid, holds 2 uops (like a store)
def is_invalid = state === s_invalid
def is_valid = state =/= s_invalid
val next_state = Wire(UInt()) // the next state of this slot (which might then get moved to a new slot)
val next_uopc = Wire(UInt()) // the next uopc of this slot (which might then get moved to a new slot)
val next_lrs1_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot)
val next_lrs2_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot)
val state = RegInit(s_invalid)
val p1 = RegInit(false.B)
val p2 = RegInit(false.B)
val p3 = RegInit(false.B)
val ppred = RegInit(false.B)
// Poison if woken up by speculative load.
// Poison lasts 1 cycle (as ldMiss will come on the next cycle).
// SO if poisoned is true, set it to false!
val p1_poisoned = RegInit(false.B)
val p2_poisoned = RegInit(false.B)
p1_poisoned := false.B
p2_poisoned := false.B
val next_p1_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p1_poisoned, p1_poisoned)
val next_p2_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p2_poisoned, p2_poisoned)
val slot_uop = RegInit(NullMicroOp)
val next_uop = Mux(io.in_uop.valid, io.in_uop.bits, slot_uop)
//-----------------------------------------------------------------------------
// next slot state computation
// compute the next state for THIS entry slot (in a collasping queue, the
// current uop may get moved elsewhere, and a new uop can enter
when (io.kill) {
state := s_invalid
} .elsewhen (io.in_uop.valid) {
state := io.in_uop.bits.iw_state
} .elsewhen (io.clear) {
state := s_invalid
} .otherwise {
state := next_state
}
//-----------------------------------------------------------------------------
// "update" state
// compute the next state for the micro-op in this slot. This micro-op may
// be moved elsewhere, so the "next_state" travels with it.
// defaults
next_state := state
next_uopc := slot_uop.uopc
next_lrs1_rtype := slot_uop.lrs1_rtype
next_lrs2_rtype := slot_uop.lrs2_rtype
when (io.kill) {
next_state := s_invalid
} .elsewhen ((io.grant && (state === s_valid_1)) ||
(io.grant && (state === s_valid_2) && p1 && p2 && ppred)) {
// try to issue this uop.
when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) {
next_state := s_invalid
}
} .elsewhen (io.grant && (state === s_valid_2)) {
when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) {
next_state := s_valid_1
when (p1) {
slot_uop.uopc := uopSTD
next_uopc := uopSTD
slot_uop.lrs1_rtype := RT_X
next_lrs1_rtype := RT_X
} .otherwise {
slot_uop.lrs2_rtype := RT_X
next_lrs2_rtype := RT_X
}
}
}
when (io.in_uop.valid) {
slot_uop := io.in_uop.bits
assert (is_invalid || io.clear || io.kill, "trying to overwrite a valid issue slot.")
}
// Wakeup Compare Logic
// these signals are the "next_p*" for the current slot's micro-op.
// they are important for shifting the current slot_uop up to an other entry.
val next_p1 = WireInit(p1)
val next_p2 = WireInit(p2)
val next_p3 = WireInit(p3)
val next_ppred = WireInit(ppred)
when (io.in_uop.valid) {
p1 := !(io.in_uop.bits.prs1_busy)
p2 := !(io.in_uop.bits.prs2_busy)
p3 := !(io.in_uop.bits.prs3_busy)
ppred := !(io.in_uop.bits.ppred_busy)
}
when (io.ldspec_miss && next_p1_poisoned) {
assert(next_uop.prs1 =/= 0.U, "Poison bit can't be set for prs1=x0!")
p1 := false.B
}
when (io.ldspec_miss && next_p2_poisoned) {
assert(next_uop.prs2 =/= 0.U, "Poison bit can't be set for prs2=x0!")
p2 := false.B
}
for (i <- 0 until numWakeupPorts) {
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs1)) {
p1 := true.B
}
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs2)) {
p2 := true.B
}
when (io.wakeup_ports(i).valid &&
(io.wakeup_ports(i).bits.pdst === next_uop.prs3)) {
p3 := true.B
}
}
when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === next_uop.ppred) {
ppred := true.B
}
for (w <- 0 until memWidth) {
assert (!(io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === 0.U),
"Loads to x0 should never speculatively wakeup other instructions")
}
// TODO disable if FP IQ.
for (w <- 0 until memWidth) {
when (io.spec_ld_wakeup(w).valid &&
io.spec_ld_wakeup(w).bits === next_uop.prs1 &&
next_uop.lrs1_rtype === RT_FIX) {
p1 := true.B
p1_poisoned := true.B
assert (!next_p1_poisoned)
}
when (io.spec_ld_wakeup(w).valid &&
io.spec_ld_wakeup(w).bits === next_uop.prs2 &&
next_uop.lrs2_rtype === RT_FIX) {
p2 := true.B
p2_poisoned := true.B
assert (!next_p2_poisoned)
}
}
// Handle branch misspeculations
val next_br_mask = GetNewBrMask(io.brupdate, slot_uop)
// was this micro-op killed by a branch? if yes, we can't let it be valid if
// we compact it into an other entry
when (IsKilledByBranch(io.brupdate, slot_uop)) {
next_state := s_invalid
}
when (!io.in_uop.valid) {
slot_uop.br_mask := next_br_mask
}
//-------------------------------------------------------------
// Request Logic
io.request := is_valid && p1 && p2 && p3 && ppred && !io.kill
val high_priority = slot_uop.is_br || slot_uop.is_jal || slot_uop.is_jalr
io.request_hp := io.request && high_priority
when (state === s_valid_1) {
io.request := p1 && p2 && p3 && ppred && !io.kill
} .elsewhen (state === s_valid_2) {
io.request := (p1 || p2) && ppred && !io.kill
} .otherwise {
io.request := false.B
}
//assign outputs
io.valid := is_valid
io.uop := slot_uop
io.uop.iw_p1_poisoned := p1_poisoned
io.uop.iw_p2_poisoned := p2_poisoned
// micro-op will vacate due to grant.
val may_vacate = io.grant && ((state === s_valid_1) || (state === s_valid_2) && p1 && p2 && ppred)
val squash_grant = io.ldspec_miss && (p1_poisoned || p2_poisoned)
io.will_be_valid := is_valid && !(may_vacate && !squash_grant)
io.out_uop := slot_uop
io.out_uop.iw_state := next_state
io.out_uop.uopc := next_uopc
io.out_uop.lrs1_rtype := next_lrs1_rtype
io.out_uop.lrs2_rtype := next_lrs2_rtype
io.out_uop.br_mask := next_br_mask
io.out_uop.prs1_busy := !p1
io.out_uop.prs2_busy := !p2
io.out_uop.prs3_busy := !p3
io.out_uop.ppred_busy := !ppred
io.out_uop.iw_p1_poisoned := p1_poisoned
io.out_uop.iw_p2_poisoned := p2_poisoned
when (state === s_valid_2) {
when (p1 && p2 && ppred) {
; // send out the entire instruction as one uop
} .elsewhen (p1 && ppred) {
io.uop.uopc := slot_uop.uopc
io.uop.lrs2_rtype := RT_X
} .elsewhen (p2 && ppred) {
io.uop.uopc := uopSTD
io.uop.lrs1_rtype := RT_X
}
}
// debug outputs
io.debug.p1 := p1
io.debug.p2 := p2
io.debug.p3 := p3
io.debug.ppred := ppred
io.debug.state := state
}
| module IssueSlot_64( // @[issue-slot.scala:69:7]
input clock, // @[issue-slot.scala:69:7]
input reset, // @[issue-slot.scala:69:7]
output io_valid, // @[issue-slot.scala:73:14]
output io_will_be_valid, // @[issue-slot.scala:73:14]
output io_request, // @[issue-slot.scala:73:14]
output io_request_hp, // @[issue-slot.scala:73:14]
input io_grant, // @[issue-slot.scala:73:14]
input [15:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:73:14]
input [15:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_uopc, // @[issue-slot.scala:73:14]
input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:73:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:73:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_iq_type, // @[issue-slot.scala:73:14]
input [9:0] io_brupdate_b2_uop_fu_code, // @[issue-slot.scala:73:14]
input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_is_load, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_is_sta, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ctrl_is_std, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_iw_state, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_br, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_jalr, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_jal, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:73:14]
input [15:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:73:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_taken, // @[issue-slot.scala:73:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:73:14]
input [11:0] io_brupdate_b2_uop_csr_addr, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:73:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_exception, // @[issue-slot.scala:73:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_bypassable, // @[issue-slot.scala:73:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:73:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_ldst_val, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_fp_single, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:73:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:73:14]
input io_brupdate_b2_valid, // @[issue-slot.scala:73:14]
input io_brupdate_b2_mispredict, // @[issue-slot.scala:73:14]
input io_brupdate_b2_taken, // @[issue-slot.scala:73:14]
input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:73:14]
input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:73:14]
input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:73:14]
input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:73:14]
input io_kill, // @[issue-slot.scala:73:14]
input io_clear, // @[issue-slot.scala:73:14]
input io_ldspec_miss, // @[issue-slot.scala:73:14]
input io_wakeup_ports_0_valid, // @[issue-slot.scala:73:14]
input [6:0] io_wakeup_ports_0_bits_pdst, // @[issue-slot.scala:73:14]
input io_wakeup_ports_0_bits_poisoned, // @[issue-slot.scala:73:14]
input io_wakeup_ports_1_valid, // @[issue-slot.scala:73:14]
input [6:0] io_wakeup_ports_1_bits_pdst, // @[issue-slot.scala:73:14]
input io_wakeup_ports_1_bits_poisoned, // @[issue-slot.scala:73:14]
input io_wakeup_ports_2_valid, // @[issue-slot.scala:73:14]
input [6:0] io_wakeup_ports_2_bits_pdst, // @[issue-slot.scala:73:14]
input io_wakeup_ports_2_bits_poisoned, // @[issue-slot.scala:73:14]
input io_wakeup_ports_3_valid, // @[issue-slot.scala:73:14]
input [6:0] io_wakeup_ports_3_bits_pdst, // @[issue-slot.scala:73:14]
input io_wakeup_ports_3_bits_poisoned, // @[issue-slot.scala:73:14]
input io_wakeup_ports_4_valid, // @[issue-slot.scala:73:14]
input [6:0] io_wakeup_ports_4_bits_pdst, // @[issue-slot.scala:73:14]
input io_wakeup_ports_4_bits_poisoned, // @[issue-slot.scala:73:14]
input io_wakeup_ports_5_valid, // @[issue-slot.scala:73:14]
input [6:0] io_wakeup_ports_5_bits_pdst, // @[issue-slot.scala:73:14]
input io_wakeup_ports_5_bits_poisoned, // @[issue-slot.scala:73:14]
input io_wakeup_ports_6_valid, // @[issue-slot.scala:73:14]
input [6:0] io_wakeup_ports_6_bits_pdst, // @[issue-slot.scala:73:14]
input io_wakeup_ports_6_bits_poisoned, // @[issue-slot.scala:73:14]
input io_spec_ld_wakeup_0_valid, // @[issue-slot.scala:73:14]
input [6:0] io_spec_ld_wakeup_0_bits, // @[issue-slot.scala:73:14]
input io_in_uop_valid, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_uopc, // @[issue-slot.scala:73:14]
input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:73:14]
input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_rvc, // @[issue-slot.scala:73:14]
input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_iq_type, // @[issue-slot.scala:73:14]
input [9:0] io_in_uop_bits_fu_code, // @[issue-slot.scala:73:14]
input [3:0] io_in_uop_bits_ctrl_br_type, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_ctrl_op1_sel, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_ctrl_op2_sel, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_ctrl_imm_sel, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_ctrl_op_fcn, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
input [2:0] io_in_uop_bits_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_is_load, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_is_sta, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ctrl_is_std, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_iw_state, // @[issue-slot.scala:73:14]
input io_in_uop_bits_iw_p1_poisoned, // @[issue-slot.scala:73:14]
input io_in_uop_bits_iw_p2_poisoned, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_br, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_jalr, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_jal, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_sfb, // @[issue-slot.scala:73:14]
input [15:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:73:14]
input [3:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:73:14]
input io_in_uop_bits_edge_inst, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:73:14]
input io_in_uop_bits_taken, // @[issue-slot.scala:73:14]
input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:73:14]
input [11:0] io_in_uop_bits_csr_addr, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_pdst, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_prs1, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_prs2, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_prs3, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_ppred, // @[issue-slot.scala:73:14]
input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:73:14]
input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:73:14]
input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:73:14]
input [6:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:73:14]
input io_in_uop_bits_exception, // @[issue-slot.scala:73:14]
input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:73:14]
input io_in_uop_bits_bypassable, // @[issue-slot.scala:73:14]
input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:73:14]
input io_in_uop_bits_mem_signed, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_fence, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_fencei, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_amo, // @[issue-slot.scala:73:14]
input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:73:14]
input io_in_uop_bits_uses_stq, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:73:14]
input io_in_uop_bits_is_unique, // @[issue-slot.scala:73:14]
input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:73:14]
input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:73:14]
input io_in_uop_bits_ldst_val, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:73:14]
input io_in_uop_bits_frs3_en, // @[issue-slot.scala:73:14]
input io_in_uop_bits_fp_val, // @[issue-slot.scala:73:14]
input io_in_uop_bits_fp_single, // @[issue-slot.scala:73:14]
input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:73:14]
input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:73:14]
input [1:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_uopc, // @[issue-slot.scala:73:14]
output [31:0] io_out_uop_inst, // @[issue-slot.scala:73:14]
output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:73:14]
output io_out_uop_is_rvc, // @[issue-slot.scala:73:14]
output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_iq_type, // @[issue-slot.scala:73:14]
output [9:0] io_out_uop_fu_code, // @[issue-slot.scala:73:14]
output [3:0] io_out_uop_ctrl_br_type, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
output [2:0] io_out_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_is_load, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_is_sta, // @[issue-slot.scala:73:14]
output io_out_uop_ctrl_is_std, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_iw_state, // @[issue-slot.scala:73:14]
output io_out_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14]
output io_out_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14]
output io_out_uop_is_br, // @[issue-slot.scala:73:14]
output io_out_uop_is_jalr, // @[issue-slot.scala:73:14]
output io_out_uop_is_jal, // @[issue-slot.scala:73:14]
output io_out_uop_is_sfb, // @[issue-slot.scala:73:14]
output [15:0] io_out_uop_br_mask, // @[issue-slot.scala:73:14]
output [3:0] io_out_uop_br_tag, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_ftq_idx, // @[issue-slot.scala:73:14]
output io_out_uop_edge_inst, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:73:14]
output io_out_uop_taken, // @[issue-slot.scala:73:14]
output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:73:14]
output [11:0] io_out_uop_csr_addr, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_rob_idx, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_ldq_idx, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_stq_idx, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_pdst, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_prs1, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_prs2, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_prs3, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_ppred, // @[issue-slot.scala:73:14]
output io_out_uop_prs1_busy, // @[issue-slot.scala:73:14]
output io_out_uop_prs2_busy, // @[issue-slot.scala:73:14]
output io_out_uop_prs3_busy, // @[issue-slot.scala:73:14]
output io_out_uop_ppred_busy, // @[issue-slot.scala:73:14]
output [6:0] io_out_uop_stale_pdst, // @[issue-slot.scala:73:14]
output io_out_uop_exception, // @[issue-slot.scala:73:14]
output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:73:14]
output io_out_uop_bypassable, // @[issue-slot.scala:73:14]
output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:73:14]
output io_out_uop_mem_signed, // @[issue-slot.scala:73:14]
output io_out_uop_is_fence, // @[issue-slot.scala:73:14]
output io_out_uop_is_fencei, // @[issue-slot.scala:73:14]
output io_out_uop_is_amo, // @[issue-slot.scala:73:14]
output io_out_uop_uses_ldq, // @[issue-slot.scala:73:14]
output io_out_uop_uses_stq, // @[issue-slot.scala:73:14]
output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14]
output io_out_uop_is_unique, // @[issue-slot.scala:73:14]
output io_out_uop_flush_on_commit, // @[issue-slot.scala:73:14]
output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_ldst, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:73:14]
output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:73:14]
output io_out_uop_ldst_val, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:73:14]
output io_out_uop_frs3_en, // @[issue-slot.scala:73:14]
output io_out_uop_fp_val, // @[issue-slot.scala:73:14]
output io_out_uop_fp_single, // @[issue-slot.scala:73:14]
output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:73:14]
output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:73:14]
output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:73:14]
output io_out_uop_bp_debug_if, // @[issue-slot.scala:73:14]
output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:73:14]
output [1:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:73:14]
output [6:0] io_uop_uopc, // @[issue-slot.scala:73:14]
output [31:0] io_uop_inst, // @[issue-slot.scala:73:14]
output [31:0] io_uop_debug_inst, // @[issue-slot.scala:73:14]
output io_uop_is_rvc, // @[issue-slot.scala:73:14]
output [39:0] io_uop_debug_pc, // @[issue-slot.scala:73:14]
output [2:0] io_uop_iq_type, // @[issue-slot.scala:73:14]
output [9:0] io_uop_fu_code, // @[issue-slot.scala:73:14]
output [3:0] io_uop_ctrl_br_type, // @[issue-slot.scala:73:14]
output [1:0] io_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14]
output [2:0] io_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14]
output [2:0] io_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14]
output [4:0] io_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14]
output io_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14]
output [2:0] io_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14]
output io_uop_ctrl_is_load, // @[issue-slot.scala:73:14]
output io_uop_ctrl_is_sta, // @[issue-slot.scala:73:14]
output io_uop_ctrl_is_std, // @[issue-slot.scala:73:14]
output [1:0] io_uop_iw_state, // @[issue-slot.scala:73:14]
output io_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14]
output io_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14]
output io_uop_is_br, // @[issue-slot.scala:73:14]
output io_uop_is_jalr, // @[issue-slot.scala:73:14]
output io_uop_is_jal, // @[issue-slot.scala:73:14]
output io_uop_is_sfb, // @[issue-slot.scala:73:14]
output [15:0] io_uop_br_mask, // @[issue-slot.scala:73:14]
output [3:0] io_uop_br_tag, // @[issue-slot.scala:73:14]
output [4:0] io_uop_ftq_idx, // @[issue-slot.scala:73:14]
output io_uop_edge_inst, // @[issue-slot.scala:73:14]
output [5:0] io_uop_pc_lob, // @[issue-slot.scala:73:14]
output io_uop_taken, // @[issue-slot.scala:73:14]
output [19:0] io_uop_imm_packed, // @[issue-slot.scala:73:14]
output [11:0] io_uop_csr_addr, // @[issue-slot.scala:73:14]
output [6:0] io_uop_rob_idx, // @[issue-slot.scala:73:14]
output [4:0] io_uop_ldq_idx, // @[issue-slot.scala:73:14]
output [4:0] io_uop_stq_idx, // @[issue-slot.scala:73:14]
output [1:0] io_uop_rxq_idx, // @[issue-slot.scala:73:14]
output [6:0] io_uop_pdst, // @[issue-slot.scala:73:14]
output [6:0] io_uop_prs1, // @[issue-slot.scala:73:14]
output [6:0] io_uop_prs2, // @[issue-slot.scala:73:14]
output [6:0] io_uop_prs3, // @[issue-slot.scala:73:14]
output [4:0] io_uop_ppred, // @[issue-slot.scala:73:14]
output io_uop_prs1_busy, // @[issue-slot.scala:73:14]
output io_uop_prs2_busy, // @[issue-slot.scala:73:14]
output io_uop_prs3_busy, // @[issue-slot.scala:73:14]
output io_uop_ppred_busy, // @[issue-slot.scala:73:14]
output [6:0] io_uop_stale_pdst, // @[issue-slot.scala:73:14]
output io_uop_exception, // @[issue-slot.scala:73:14]
output [63:0] io_uop_exc_cause, // @[issue-slot.scala:73:14]
output io_uop_bypassable, // @[issue-slot.scala:73:14]
output [4:0] io_uop_mem_cmd, // @[issue-slot.scala:73:14]
output [1:0] io_uop_mem_size, // @[issue-slot.scala:73:14]
output io_uop_mem_signed, // @[issue-slot.scala:73:14]
output io_uop_is_fence, // @[issue-slot.scala:73:14]
output io_uop_is_fencei, // @[issue-slot.scala:73:14]
output io_uop_is_amo, // @[issue-slot.scala:73:14]
output io_uop_uses_ldq, // @[issue-slot.scala:73:14]
output io_uop_uses_stq, // @[issue-slot.scala:73:14]
output io_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14]
output io_uop_is_unique, // @[issue-slot.scala:73:14]
output io_uop_flush_on_commit, // @[issue-slot.scala:73:14]
output io_uop_ldst_is_rs1, // @[issue-slot.scala:73:14]
output [5:0] io_uop_ldst, // @[issue-slot.scala:73:14]
output [5:0] io_uop_lrs1, // @[issue-slot.scala:73:14]
output [5:0] io_uop_lrs2, // @[issue-slot.scala:73:14]
output [5:0] io_uop_lrs3, // @[issue-slot.scala:73:14]
output io_uop_ldst_val, // @[issue-slot.scala:73:14]
output [1:0] io_uop_dst_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_uop_lrs1_rtype, // @[issue-slot.scala:73:14]
output [1:0] io_uop_lrs2_rtype, // @[issue-slot.scala:73:14]
output io_uop_frs3_en, // @[issue-slot.scala:73:14]
output io_uop_fp_val, // @[issue-slot.scala:73:14]
output io_uop_fp_single, // @[issue-slot.scala:73:14]
output io_uop_xcpt_pf_if, // @[issue-slot.scala:73:14]
output io_uop_xcpt_ae_if, // @[issue-slot.scala:73:14]
output io_uop_xcpt_ma_if, // @[issue-slot.scala:73:14]
output io_uop_bp_debug_if, // @[issue-slot.scala:73:14]
output io_uop_bp_xcpt_if, // @[issue-slot.scala:73:14]
output [1:0] io_uop_debug_fsrc, // @[issue-slot.scala:73:14]
output [1:0] io_uop_debug_tsrc, // @[issue-slot.scala:73:14]
output io_debug_p1, // @[issue-slot.scala:73:14]
output io_debug_p2, // @[issue-slot.scala:73:14]
output io_debug_p3, // @[issue-slot.scala:73:14]
output io_debug_ppred, // @[issue-slot.scala:73:14]
output [1:0] io_debug_state // @[issue-slot.scala:73:14]
);
wire io_grant_0 = io_grant; // @[issue-slot.scala:69:7]
wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:69:7]
wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[issue-slot.scala:69:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:69:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:69:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[issue-slot.scala:69:7]
wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[issue-slot.scala:69:7]
wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:69:7]
wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:69:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:69:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:69:7]
wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:69:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:69:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[issue-slot.scala:69:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:69:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:69:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:69:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:69:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:69:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:69:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:69:7]
wire io_kill_0 = io_kill; // @[issue-slot.scala:69:7]
wire io_clear_0 = io_clear; // @[issue-slot.scala:69:7]
wire io_ldspec_miss_0 = io_ldspec_miss; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_wakeup_ports_0_bits_pdst_0 = io_wakeup_ports_0_bits_pdst; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_0_bits_poisoned_0 = io_wakeup_ports_0_bits_poisoned; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_wakeup_ports_1_bits_pdst_0 = io_wakeup_ports_1_bits_pdst; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_1_bits_poisoned_0 = io_wakeup_ports_1_bits_poisoned; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_2_valid_0 = io_wakeup_ports_2_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_wakeup_ports_2_bits_pdst_0 = io_wakeup_ports_2_bits_pdst; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_2_bits_poisoned_0 = io_wakeup_ports_2_bits_poisoned; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_3_valid_0 = io_wakeup_ports_3_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_wakeup_ports_3_bits_pdst_0 = io_wakeup_ports_3_bits_pdst; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_3_bits_poisoned_0 = io_wakeup_ports_3_bits_poisoned; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_4_valid_0 = io_wakeup_ports_4_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_wakeup_ports_4_bits_pdst_0 = io_wakeup_ports_4_bits_pdst; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_4_bits_poisoned_0 = io_wakeup_ports_4_bits_poisoned; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_5_valid_0 = io_wakeup_ports_5_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_wakeup_ports_5_bits_pdst_0 = io_wakeup_ports_5_bits_pdst; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_5_bits_poisoned_0 = io_wakeup_ports_5_bits_poisoned; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_6_valid_0 = io_wakeup_ports_6_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_wakeup_ports_6_bits_pdst_0 = io_wakeup_ports_6_bits_pdst; // @[issue-slot.scala:69:7]
wire io_wakeup_ports_6_bits_poisoned_0 = io_wakeup_ports_6_bits_poisoned; // @[issue-slot.scala:69:7]
wire io_spec_ld_wakeup_0_valid_0 = io_spec_ld_wakeup_0_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_spec_ld_wakeup_0_bits_0 = io_spec_ld_wakeup_0_bits; // @[issue-slot.scala:69:7]
wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_uopc_0 = io_in_uop_bits_uopc; // @[issue-slot.scala:69:7]
wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:69:7]
wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:69:7]
wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_iq_type_0 = io_in_uop_bits_iq_type; // @[issue-slot.scala:69:7]
wire [9:0] io_in_uop_bits_fu_code_0 = io_in_uop_bits_fu_code; // @[issue-slot.scala:69:7]
wire [3:0] io_in_uop_bits_ctrl_br_type_0 = io_in_uop_bits_ctrl_br_type; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_ctrl_op1_sel_0 = io_in_uop_bits_ctrl_op1_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_ctrl_op2_sel_0 = io_in_uop_bits_ctrl_op2_sel; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_ctrl_imm_sel_0 = io_in_uop_bits_ctrl_imm_sel; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_ctrl_op_fcn_0 = io_in_uop_bits_ctrl_op_fcn; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_fcn_dw_0 = io_in_uop_bits_ctrl_fcn_dw; // @[issue-slot.scala:69:7]
wire [2:0] io_in_uop_bits_ctrl_csr_cmd_0 = io_in_uop_bits_ctrl_csr_cmd; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_is_load_0 = io_in_uop_bits_ctrl_is_load; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_is_sta_0 = io_in_uop_bits_ctrl_is_sta; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ctrl_is_std_0 = io_in_uop_bits_ctrl_is_std; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_iw_state_0 = io_in_uop_bits_iw_state; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_iw_p1_poisoned_0 = io_in_uop_bits_iw_p1_poisoned; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_iw_p2_poisoned_0 = io_in_uop_bits_iw_p2_poisoned; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_br_0 = io_in_uop_bits_is_br; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_jalr_0 = io_in_uop_bits_is_jalr; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_jal_0 = io_in_uop_bits_is_jal; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:69:7]
wire [15:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:69:7]
wire [3:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:69:7]
wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:69:7]
wire [11:0] io_in_uop_bits_csr_addr_0 = io_in_uop_bits_csr_addr; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:69:7]
wire [6:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:69:7]
wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_bypassable_0 = io_in_uop_bits_bypassable; // @[issue-slot.scala:69:7]
wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:69:7]
wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_ldst_val_0 = io_in_uop_bits_ldst_val; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_fp_single_0 = io_in_uop_bits_fp_single; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:69:7]
wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:69:7]
wire [1:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:69:7]
wire io_pred_wakeup_port_valid = 1'h0; // @[issue-slot.scala:69:7]
wire slot_uop_uop_is_rvc = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_br = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_jalr = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_jal = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_sfb = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_edge_inst = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_taken = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_prs1_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_prs2_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_prs3_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ppred_busy = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_exception = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_bypassable = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_mem_signed = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_fence = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_fencei = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_amo = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_uses_ldq = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_uses_stq = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_is_unique = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_ldst_val = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_frs3_en = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_fp_val = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_fp_single = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19]
wire slot_uop_cs_fcn_dw = 1'h0; // @[consts.scala:279:18]
wire slot_uop_cs_is_load = 1'h0; // @[consts.scala:279:18]
wire slot_uop_cs_is_sta = 1'h0; // @[consts.scala:279:18]
wire slot_uop_cs_is_std = 1'h0; // @[consts.scala:279:18]
wire [4:0] io_pred_wakeup_port_bits = 5'h0; // @[issue-slot.scala:69:7]
wire [4:0] slot_uop_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_ftq_idx = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_ldq_idx = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_stq_idx = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_ppred = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_uop_mem_cmd = 5'h0; // @[consts.scala:269:19]
wire [4:0] slot_uop_cs_op_fcn = 5'h0; // @[consts.scala:279:18]
wire [2:0] slot_uop_uop_iq_type = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19]
wire [2:0] slot_uop_cs_op2_sel = 3'h0; // @[consts.scala:279:18]
wire [2:0] slot_uop_cs_imm_sel = 3'h0; // @[consts.scala:279:18]
wire [2:0] slot_uop_cs_csr_cmd = 3'h0; // @[consts.scala:279:18]
wire [1:0] slot_uop_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_iw_state = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_rxq_idx = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_mem_size = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19]
wire [1:0] slot_uop_cs_op1_sel = 2'h0; // @[consts.scala:279:18]
wire [3:0] slot_uop_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19]
wire [3:0] slot_uop_uop_br_tag = 4'h0; // @[consts.scala:269:19]
wire [3:0] slot_uop_cs_br_type = 4'h0; // @[consts.scala:279:18]
wire [1:0] slot_uop_uop_dst_rtype = 2'h2; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_pc_lob = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_ldst = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_lrs1 = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_lrs2 = 6'h0; // @[consts.scala:269:19]
wire [5:0] slot_uop_uop_lrs3 = 6'h0; // @[consts.scala:269:19]
wire [63:0] slot_uop_uop_exc_cause = 64'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_uopc = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_rob_idx = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_pdst = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_prs1 = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_prs2 = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_prs3 = 7'h0; // @[consts.scala:269:19]
wire [6:0] slot_uop_uop_stale_pdst = 7'h0; // @[consts.scala:269:19]
wire [11:0] slot_uop_uop_csr_addr = 12'h0; // @[consts.scala:269:19]
wire [19:0] slot_uop_uop_imm_packed = 20'h0; // @[consts.scala:269:19]
wire [15:0] slot_uop_uop_br_mask = 16'h0; // @[consts.scala:269:19]
wire [9:0] slot_uop_uop_fu_code = 10'h0; // @[consts.scala:269:19]
wire [39:0] slot_uop_uop_debug_pc = 40'h0; // @[consts.scala:269:19]
wire [31:0] slot_uop_uop_inst = 32'h0; // @[consts.scala:269:19]
wire [31:0] slot_uop_uop_debug_inst = 32'h0; // @[consts.scala:269:19]
wire _io_valid_T; // @[issue-slot.scala:79:24]
wire _io_will_be_valid_T_4; // @[issue-slot.scala:262:32]
wire _io_request_hp_T; // @[issue-slot.scala:243:31]
wire [6:0] next_uopc; // @[issue-slot.scala:82:29]
wire [1:0] next_state; // @[issue-slot.scala:81:29]
wire [15:0] next_br_mask; // @[util.scala:85:25]
wire _io_out_uop_prs1_busy_T; // @[issue-slot.scala:270:28]
wire _io_out_uop_prs2_busy_T; // @[issue-slot.scala:271:28]
wire _io_out_uop_prs3_busy_T; // @[issue-slot.scala:272:28]
wire _io_out_uop_ppred_busy_T; // @[issue-slot.scala:273:28]
wire [1:0] next_lrs1_rtype; // @[issue-slot.scala:83:29]
wire [1:0] next_lrs2_rtype; // @[issue-slot.scala:84:29]
wire [3:0] io_out_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_uopc_0; // @[issue-slot.scala:69:7]
wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:69:7]
wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_rvc_0; // @[issue-slot.scala:69:7]
wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:69:7]
wire [2:0] io_out_uop_iq_type_0; // @[issue-slot.scala:69:7]
wire [9:0] io_out_uop_fu_code_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_iw_state_0; // @[issue-slot.scala:69:7]
wire io_out_uop_iw_p1_poisoned_0; // @[issue-slot.scala:69:7]
wire io_out_uop_iw_p2_poisoned_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_br_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_jalr_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_jal_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_sfb_0; // @[issue-slot.scala:69:7]
wire [15:0] io_out_uop_br_mask_0; // @[issue-slot.scala:69:7]
wire [3:0] io_out_uop_br_tag_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:69:7]
wire io_out_uop_edge_inst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:69:7]
wire io_out_uop_taken_0; // @[issue-slot.scala:69:7]
wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:69:7]
wire [11:0] io_out_uop_csr_addr_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_pdst_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_prs1_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_prs2_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_prs3_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_ppred_0; // @[issue-slot.scala:69:7]
wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:69:7]
wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:69:7]
wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:69:7]
wire [6:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:69:7]
wire io_out_uop_exception_0; // @[issue-slot.scala:69:7]
wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:69:7]
wire io_out_uop_bypassable_0; // @[issue-slot.scala:69:7]
wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:69:7]
wire io_out_uop_mem_signed_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_fence_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_fencei_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_amo_0; // @[issue-slot.scala:69:7]
wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:69:7]
wire io_out_uop_uses_stq_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7]
wire io_out_uop_is_unique_0; // @[issue-slot.scala:69:7]
wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:69:7]
wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:69:7]
wire io_out_uop_ldst_val_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7]
wire io_out_uop_frs3_en_0; // @[issue-slot.scala:69:7]
wire io_out_uop_fp_val_0; // @[issue-slot.scala:69:7]
wire io_out_uop_fp_single_0; // @[issue-slot.scala:69:7]
wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:69:7]
wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:69:7]
wire [1:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:69:7]
wire [3:0] io_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7]
wire io_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_uopc_0; // @[issue-slot.scala:69:7]
wire [31:0] io_uop_inst_0; // @[issue-slot.scala:69:7]
wire [31:0] io_uop_debug_inst_0; // @[issue-slot.scala:69:7]
wire io_uop_is_rvc_0; // @[issue-slot.scala:69:7]
wire [39:0] io_uop_debug_pc_0; // @[issue-slot.scala:69:7]
wire [2:0] io_uop_iq_type_0; // @[issue-slot.scala:69:7]
wire [9:0] io_uop_fu_code_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_iw_state_0; // @[issue-slot.scala:69:7]
wire io_uop_iw_p1_poisoned_0; // @[issue-slot.scala:69:7]
wire io_uop_iw_p2_poisoned_0; // @[issue-slot.scala:69:7]
wire io_uop_is_br_0; // @[issue-slot.scala:69:7]
wire io_uop_is_jalr_0; // @[issue-slot.scala:69:7]
wire io_uop_is_jal_0; // @[issue-slot.scala:69:7]
wire io_uop_is_sfb_0; // @[issue-slot.scala:69:7]
wire [15:0] io_uop_br_mask_0; // @[issue-slot.scala:69:7]
wire [3:0] io_uop_br_tag_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_ftq_idx_0; // @[issue-slot.scala:69:7]
wire io_uop_edge_inst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_pc_lob_0; // @[issue-slot.scala:69:7]
wire io_uop_taken_0; // @[issue-slot.scala:69:7]
wire [19:0] io_uop_imm_packed_0; // @[issue-slot.scala:69:7]
wire [11:0] io_uop_csr_addr_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_rob_idx_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_ldq_idx_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_stq_idx_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_rxq_idx_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_pdst_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_prs1_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_prs2_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_prs3_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_ppred_0; // @[issue-slot.scala:69:7]
wire io_uop_prs1_busy_0; // @[issue-slot.scala:69:7]
wire io_uop_prs2_busy_0; // @[issue-slot.scala:69:7]
wire io_uop_prs3_busy_0; // @[issue-slot.scala:69:7]
wire io_uop_ppred_busy_0; // @[issue-slot.scala:69:7]
wire [6:0] io_uop_stale_pdst_0; // @[issue-slot.scala:69:7]
wire io_uop_exception_0; // @[issue-slot.scala:69:7]
wire [63:0] io_uop_exc_cause_0; // @[issue-slot.scala:69:7]
wire io_uop_bypassable_0; // @[issue-slot.scala:69:7]
wire [4:0] io_uop_mem_cmd_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_mem_size_0; // @[issue-slot.scala:69:7]
wire io_uop_mem_signed_0; // @[issue-slot.scala:69:7]
wire io_uop_is_fence_0; // @[issue-slot.scala:69:7]
wire io_uop_is_fencei_0; // @[issue-slot.scala:69:7]
wire io_uop_is_amo_0; // @[issue-slot.scala:69:7]
wire io_uop_uses_ldq_0; // @[issue-slot.scala:69:7]
wire io_uop_uses_stq_0; // @[issue-slot.scala:69:7]
wire io_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7]
wire io_uop_is_unique_0; // @[issue-slot.scala:69:7]
wire io_uop_flush_on_commit_0; // @[issue-slot.scala:69:7]
wire io_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_ldst_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_lrs1_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_lrs2_0; // @[issue-slot.scala:69:7]
wire [5:0] io_uop_lrs3_0; // @[issue-slot.scala:69:7]
wire io_uop_ldst_val_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_dst_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7]
wire io_uop_frs3_en_0; // @[issue-slot.scala:69:7]
wire io_uop_fp_val_0; // @[issue-slot.scala:69:7]
wire io_uop_fp_single_0; // @[issue-slot.scala:69:7]
wire io_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7]
wire io_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7]
wire io_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7]
wire io_uop_bp_debug_if_0; // @[issue-slot.scala:69:7]
wire io_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_debug_fsrc_0; // @[issue-slot.scala:69:7]
wire [1:0] io_uop_debug_tsrc_0; // @[issue-slot.scala:69:7]
wire io_debug_p1_0; // @[issue-slot.scala:69:7]
wire io_debug_p2_0; // @[issue-slot.scala:69:7]
wire io_debug_p3_0; // @[issue-slot.scala:69:7]
wire io_debug_ppred_0; // @[issue-slot.scala:69:7]
wire [1:0] io_debug_state_0; // @[issue-slot.scala:69:7]
wire io_valid_0; // @[issue-slot.scala:69:7]
wire io_will_be_valid_0; // @[issue-slot.scala:69:7]
wire io_request_0; // @[issue-slot.scala:69:7]
wire io_request_hp_0; // @[issue-slot.scala:69:7]
assign io_out_uop_iw_state_0 = next_state; // @[issue-slot.scala:69:7, :81:29]
assign io_out_uop_uopc_0 = next_uopc; // @[issue-slot.scala:69:7, :82:29]
assign io_out_uop_lrs1_rtype_0 = next_lrs1_rtype; // @[issue-slot.scala:69:7, :83:29]
assign io_out_uop_lrs2_rtype_0 = next_lrs2_rtype; // @[issue-slot.scala:69:7, :84:29]
reg [1:0] state; // @[issue-slot.scala:86:22]
assign io_debug_state_0 = state; // @[issue-slot.scala:69:7, :86:22]
reg p1; // @[issue-slot.scala:87:22]
assign io_debug_p1_0 = p1; // @[issue-slot.scala:69:7, :87:22]
wire next_p1 = p1; // @[issue-slot.scala:87:22, :163:25]
reg p2; // @[issue-slot.scala:88:22]
assign io_debug_p2_0 = p2; // @[issue-slot.scala:69:7, :88:22]
wire next_p2 = p2; // @[issue-slot.scala:88:22, :164:25]
reg p3; // @[issue-slot.scala:89:22]
assign io_debug_p3_0 = p3; // @[issue-slot.scala:69:7, :89:22]
wire next_p3 = p3; // @[issue-slot.scala:89:22, :165:25]
reg ppred; // @[issue-slot.scala:90:22]
assign io_debug_ppred_0 = ppred; // @[issue-slot.scala:69:7, :90:22]
wire next_ppred = ppred; // @[issue-slot.scala:90:22, :166:28]
reg p1_poisoned; // @[issue-slot.scala:95:28]
assign io_out_uop_iw_p1_poisoned_0 = p1_poisoned; // @[issue-slot.scala:69:7, :95:28]
assign io_uop_iw_p1_poisoned_0 = p1_poisoned; // @[issue-slot.scala:69:7, :95:28]
reg p2_poisoned; // @[issue-slot.scala:96:28]
assign io_out_uop_iw_p2_poisoned_0 = p2_poisoned; // @[issue-slot.scala:69:7, :96:28]
assign io_uop_iw_p2_poisoned_0 = p2_poisoned; // @[issue-slot.scala:69:7, :96:28]
wire next_p1_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p1_poisoned_0 : p1_poisoned; // @[issue-slot.scala:69:7, :95:28, :99:29]
wire next_p2_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p2_poisoned_0 : p2_poisoned; // @[issue-slot.scala:69:7, :96:28, :100:29]
reg [6:0] slot_uop_uopc; // @[issue-slot.scala:102:25]
reg [31:0] slot_uop_inst; // @[issue-slot.scala:102:25]
assign io_out_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25]
reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_rvc; // @[issue-slot.scala:102:25]
assign io_out_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25]
reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_iq_type; // @[issue-slot.scala:102:25]
assign io_out_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25]
reg [9:0] slot_uop_fu_code; // @[issue-slot.scala:102:25]
assign io_out_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25]
reg [3:0] slot_uop_ctrl_br_type; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_ctrl_op1_sel; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_ctrl_op2_sel; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_ctrl_imm_sel; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_ctrl_op_fcn; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25]
reg [2:0] slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_is_load; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_is_sta; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ctrl_is_std; // @[issue-slot.scala:102:25]
assign io_out_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_iw_state; // @[issue-slot.scala:102:25]
assign io_uop_iw_state_0 = slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_iw_p1_poisoned; // @[issue-slot.scala:102:25]
reg slot_uop_iw_p2_poisoned; // @[issue-slot.scala:102:25]
reg slot_uop_is_br; // @[issue-slot.scala:102:25]
assign io_out_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_jalr; // @[issue-slot.scala:102:25]
assign io_out_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_jal; // @[issue-slot.scala:102:25]
assign io_out_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_sfb; // @[issue-slot.scala:102:25]
assign io_out_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25]
reg [15:0] slot_uop_br_mask; // @[issue-slot.scala:102:25]
assign io_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25]
reg [3:0] slot_uop_br_tag; // @[issue-slot.scala:102:25]
assign io_out_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_ftq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_edge_inst; // @[issue-slot.scala:102:25]
assign io_out_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:102:25]
assign io_out_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_taken; // @[issue-slot.scala:102:25]
assign io_out_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25]
reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:102:25]
assign io_out_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25]
reg [11:0] slot_uop_csr_addr; // @[issue-slot.scala:102:25]
assign io_out_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_rob_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_ldq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_stq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:102:25]
assign io_out_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_pdst; // @[issue-slot.scala:102:25]
assign io_out_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_prs1; // @[issue-slot.scala:102:25]
assign io_out_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_prs2; // @[issue-slot.scala:102:25]
assign io_out_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_prs3; // @[issue-slot.scala:102:25]
assign io_out_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_ppred; // @[issue-slot.scala:102:25]
assign io_out_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_prs1_busy; // @[issue-slot.scala:102:25]
assign io_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_prs2_busy; // @[issue-slot.scala:102:25]
assign io_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_prs3_busy; // @[issue-slot.scala:102:25]
assign io_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ppred_busy; // @[issue-slot.scala:102:25]
assign io_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25]
reg [6:0] slot_uop_stale_pdst; // @[issue-slot.scala:102:25]
assign io_out_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_exception; // @[issue-slot.scala:102:25]
assign io_out_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25]
reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:102:25]
assign io_out_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_bypassable; // @[issue-slot.scala:102:25]
assign io_out_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25]
reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:102:25]
assign io_out_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:102:25]
assign io_out_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_mem_signed; // @[issue-slot.scala:102:25]
assign io_out_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_fence; // @[issue-slot.scala:102:25]
assign io_out_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_fencei; // @[issue-slot.scala:102:25]
assign io_out_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_amo; // @[issue-slot.scala:102:25]
assign io_out_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_uses_ldq; // @[issue-slot.scala:102:25]
assign io_out_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_uses_stq; // @[issue-slot.scala:102:25]
assign io_out_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:102:25]
assign io_out_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_is_unique; // @[issue-slot.scala:102:25]
assign io_out_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_flush_on_commit; // @[issue-slot.scala:102:25]
assign io_out_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:102:25]
assign io_out_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_ldst; // @[issue-slot.scala:102:25]
assign io_out_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:102:25]
assign io_out_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:102:25]
assign io_out_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25]
reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:102:25]
assign io_out_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_ldst_val; // @[issue-slot.scala:102:25]
assign io_out_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:102:25]
assign io_out_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:102:25]
reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:102:25]
reg slot_uop_frs3_en; // @[issue-slot.scala:102:25]
assign io_out_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_fp_val; // @[issue-slot.scala:102:25]
assign io_out_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_fp_single; // @[issue-slot.scala:102:25]
assign io_out_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:102:25]
assign io_out_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:102:25]
assign io_out_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:102:25]
assign io_out_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_bp_debug_if; // @[issue-slot.scala:102:25]
assign io_out_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25]
reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:102:25]
assign io_out_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_debug_fsrc; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25]
reg [1:0] slot_uop_debug_tsrc; // @[issue-slot.scala:102:25]
assign io_out_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25]
assign io_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25]
wire [6:0] next_uop_uopc = io_in_uop_valid_0 ? io_in_uop_bits_uopc_0 : slot_uop_uopc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [31:0] next_uop_inst = io_in_uop_valid_0 ? io_in_uop_bits_inst_0 : slot_uop_inst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [31:0] next_uop_debug_inst = io_in_uop_valid_0 ? io_in_uop_bits_debug_inst_0 : slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_rvc = io_in_uop_valid_0 ? io_in_uop_bits_is_rvc_0 : slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [39:0] next_uop_debug_pc = io_in_uop_valid_0 ? io_in_uop_bits_debug_pc_0 : slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_iq_type = io_in_uop_valid_0 ? io_in_uop_bits_iq_type_0 : slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [9:0] next_uop_fu_code = io_in_uop_valid_0 ? io_in_uop_bits_fu_code_0 : slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [3:0] next_uop_ctrl_br_type = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_br_type_0 : slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_ctrl_op1_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op1_sel_0 : slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_ctrl_op2_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op2_sel_0 : slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_ctrl_imm_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_imm_sel_0 : slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_ctrl_op_fcn = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op_fcn_0 : slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_fcn_dw = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_fcn_dw_0 : slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [2:0] next_uop_ctrl_csr_cmd = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_csr_cmd_0 : slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_is_load = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_load_0 : slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_is_sta = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_sta_0 : slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ctrl_is_std = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_std_0 : slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_iw_state = io_in_uop_valid_0 ? io_in_uop_bits_iw_state_0 : slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_iw_p1_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p1_poisoned_0 : slot_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_iw_p2_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p2_poisoned_0 : slot_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_br = io_in_uop_valid_0 ? io_in_uop_bits_is_br_0 : slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_jalr = io_in_uop_valid_0 ? io_in_uop_bits_is_jalr_0 : slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_jal = io_in_uop_valid_0 ? io_in_uop_bits_is_jal_0 : slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_sfb = io_in_uop_valid_0 ? io_in_uop_bits_is_sfb_0 : slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [15:0] next_uop_br_mask = io_in_uop_valid_0 ? io_in_uop_bits_br_mask_0 : slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [3:0] next_uop_br_tag = io_in_uop_valid_0 ? io_in_uop_bits_br_tag_0 : slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_ftq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ftq_idx_0 : slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_edge_inst = io_in_uop_valid_0 ? io_in_uop_bits_edge_inst_0 : slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_pc_lob = io_in_uop_valid_0 ? io_in_uop_bits_pc_lob_0 : slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_taken = io_in_uop_valid_0 ? io_in_uop_bits_taken_0 : slot_uop_taken; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [19:0] next_uop_imm_packed = io_in_uop_valid_0 ? io_in_uop_bits_imm_packed_0 : slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [11:0] next_uop_csr_addr = io_in_uop_valid_0 ? io_in_uop_bits_csr_addr_0 : slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_rob_idx = io_in_uop_valid_0 ? io_in_uop_bits_rob_idx_0 : slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_ldq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ldq_idx_0 : slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_stq_idx = io_in_uop_valid_0 ? io_in_uop_bits_stq_idx_0 : slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_rxq_idx = io_in_uop_valid_0 ? io_in_uop_bits_rxq_idx_0 : slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_pdst = io_in_uop_valid_0 ? io_in_uop_bits_pdst_0 : slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_prs1 = io_in_uop_valid_0 ? io_in_uop_bits_prs1_0 : slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_prs2 = io_in_uop_valid_0 ? io_in_uop_bits_prs2_0 : slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_prs3 = io_in_uop_valid_0 ? io_in_uop_bits_prs3_0 : slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_ppred = io_in_uop_valid_0 ? io_in_uop_bits_ppred_0 : slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_prs1_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs1_busy_0 : slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_prs2_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs2_busy_0 : slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_prs3_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs3_busy_0 : slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ppred_busy = io_in_uop_valid_0 ? io_in_uop_bits_ppred_busy_0 : slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [6:0] next_uop_stale_pdst = io_in_uop_valid_0 ? io_in_uop_bits_stale_pdst_0 : slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_exception = io_in_uop_valid_0 ? io_in_uop_bits_exception_0 : slot_uop_exception; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [63:0] next_uop_exc_cause = io_in_uop_valid_0 ? io_in_uop_bits_exc_cause_0 : slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_bypassable = io_in_uop_valid_0 ? io_in_uop_bits_bypassable_0 : slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [4:0] next_uop_mem_cmd = io_in_uop_valid_0 ? io_in_uop_bits_mem_cmd_0 : slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_mem_size = io_in_uop_valid_0 ? io_in_uop_bits_mem_size_0 : slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_mem_signed = io_in_uop_valid_0 ? io_in_uop_bits_mem_signed_0 : slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_fence = io_in_uop_valid_0 ? io_in_uop_bits_is_fence_0 : slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_fencei = io_in_uop_valid_0 ? io_in_uop_bits_is_fencei_0 : slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_amo = io_in_uop_valid_0 ? io_in_uop_bits_is_amo_0 : slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_uses_ldq = io_in_uop_valid_0 ? io_in_uop_bits_uses_ldq_0 : slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_uses_stq = io_in_uop_valid_0 ? io_in_uop_bits_uses_stq_0 : slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_sys_pc2epc = io_in_uop_valid_0 ? io_in_uop_bits_is_sys_pc2epc_0 : slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_is_unique = io_in_uop_valid_0 ? io_in_uop_bits_is_unique_0 : slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_flush_on_commit = io_in_uop_valid_0 ? io_in_uop_bits_flush_on_commit_0 : slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ldst_is_rs1 = io_in_uop_valid_0 ? io_in_uop_bits_ldst_is_rs1_0 : slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_ldst = io_in_uop_valid_0 ? io_in_uop_bits_ldst_0 : slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_lrs1 = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_0 : slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_lrs2 = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_0 : slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [5:0] next_uop_lrs3 = io_in_uop_valid_0 ? io_in_uop_bits_lrs3_0 : slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_ldst_val = io_in_uop_valid_0 ? io_in_uop_bits_ldst_val_0 : slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_dst_rtype = io_in_uop_valid_0 ? io_in_uop_bits_dst_rtype_0 : slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_lrs1_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_rtype_0 : slot_uop_lrs1_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_lrs2_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_rtype_0 : slot_uop_lrs2_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_frs3_en = io_in_uop_valid_0 ? io_in_uop_bits_frs3_en_0 : slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_fp_val = io_in_uop_valid_0 ? io_in_uop_bits_fp_val_0 : slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_fp_single = io_in_uop_valid_0 ? io_in_uop_bits_fp_single_0 : slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_xcpt_pf_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_pf_if_0 : slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_xcpt_ae_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ae_if_0 : slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_xcpt_ma_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ma_if_0 : slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_bp_debug_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_debug_if_0 : slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire next_uop_bp_xcpt_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_xcpt_if_0 : slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_debug_fsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_fsrc_0 : slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire [1:0] next_uop_debug_tsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_tsrc_0 : slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25, :103:21]
wire _T_11 = state == 2'h2; // @[issue-slot.scala:86:22, :134:25]
wire _T_7 = io_grant_0 & state == 2'h1 | io_grant_0 & _T_11 & p1 & p2 & ppred; // @[issue-slot.scala:69:7, :86:22, :87:22, :88:22, :90:22, :133:{26,36,52}, :134:{15,25,40,46,52}]
wire _T_12 = io_grant_0 & _T_11; // @[issue-slot.scala:69:7, :134:25, :139:25]
wire _T_14 = io_ldspec_miss_0 & (p1_poisoned | p2_poisoned); // @[issue-slot.scala:69:7, :95:28, :96:28, :140:{28,44}]
wire _GEN = _T_12 & ~_T_14; // @[issue-slot.scala:126:14, :139:{25,51}, :140:{11,28,62}, :141:18]
wire _GEN_0 = io_kill_0 | _T_7; // @[issue-slot.scala:69:7, :102:25, :131:18, :133:52, :134:63, :139:51]
wire _GEN_1 = _GEN_0 | ~(_T_12 & ~_T_14 & p1); // @[issue-slot.scala:87:22, :102:25, :131:18, :134:63, :139:{25,51}, :140:{11,28,62}, :142:17, :143:23]
assign next_uopc = _GEN_1 ? slot_uop_uopc : 7'h3; // @[issue-slot.scala:82:29, :102:25, :131:18, :134:63, :139:51]
assign next_lrs1_rtype = _GEN_1 ? slot_uop_lrs1_rtype : 2'h2; // @[issue-slot.scala:83:29, :102:25, :131:18, :134:63, :139:51]
wire _GEN_2 = _GEN_0 | ~_GEN | p1; // @[issue-slot.scala:87:22, :102:25, :126:14, :131:18, :134:63, :139:51, :140:62, :141:18, :142:17]
assign next_lrs2_rtype = _GEN_2 ? slot_uop_lrs2_rtype : 2'h2; // @[issue-slot.scala:84:29, :102:25, :131:18, :134:63, :139:51, :140:62, :142:17]
wire _p1_T = ~io_in_uop_bits_prs1_busy_0; // @[issue-slot.scala:69:7, :169:11]
wire _p2_T = ~io_in_uop_bits_prs2_busy_0; // @[issue-slot.scala:69:7, :170:11]
wire _p3_T = ~io_in_uop_bits_prs3_busy_0; // @[issue-slot.scala:69:7, :171:11]
wire _ppred_T = ~io_in_uop_bits_ppred_busy_0; // @[issue-slot.scala:69:7, :172:14]
wire _T_22 = io_ldspec_miss_0 & next_p1_poisoned; // @[issue-slot.scala:69:7, :99:29, :175:24]
wire _T_27 = io_ldspec_miss_0 & next_p2_poisoned; // @[issue-slot.scala:69:7, :100:29, :179:24]
wire _T_85 = io_spec_ld_wakeup_0_valid_0 & io_spec_ld_wakeup_0_bits_0 == next_uop_prs1 & next_uop_lrs1_rtype == 2'h0; // @[issue-slot.scala:69:7, :103:21, :209:38, :210:{33,51}, :211:27]
wire _T_93 = io_spec_ld_wakeup_0_valid_0 & io_spec_ld_wakeup_0_bits_0 == next_uop_prs2 & next_uop_lrs2_rtype == 2'h0; // @[issue-slot.scala:69:7, :103:21, :216:38, :217:{33,51}, :218:27] |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
File tage.scala:
package boom.v4.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v4.common._
import boom.v4.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int, val singlePorted: Boolean)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f2_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val us = SyncReadMem(nRows, Vec(bankWidth*2, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
us.suggestName(s"tage_u_${histLength}")
table.suggestName(s"tage_table_${histLength}")
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = Wire(Vec(bankWidth, new TageEntry))
val s2_req_rus = Wire(Vec(bankWidth*2, Bool()))
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f2_resp(w).valid := s2_req_rhits(w)
io.f2_resp(w).bits.u := Cat(s2_req_rus(w*2+1), s2_req_rus(w*2))
io.f2_resp(w).bits.ctr := s2_req_rtage(w).ctr
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val clear_u_hi = clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val clear_u_lo = clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val clear_u_mask = VecInit((0 until bankWidth*2) map { i => if (i % 2 == 0) clear_u_lo else clear_u_hi }).asUInt
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
val wen = WireInit(doing_reset || io.update_mask.reduce(_||_))
val rdata = if (singlePorted) table.read(s1_hashed_idx, !wen && io.f1_req_valid) else table.read(s1_hashed_idx, io.f1_req_valid)
when (RegNext(wen) && singlePorted.B) {
s2_req_rtage := 0.U.asTypeOf(Vec(bankWidth, new TageEntry))
} .otherwise {
s2_req_rtage := VecInit(rdata.map(_.asTypeOf(new TageEntry)))
}
when (wen) {
val widx = Mux(doing_reset, reset_idx, update_idx)
val wdata = Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt)))
val wmask = Mux(doing_reset, ~(0.U(bankWidth.W)), io.update_mask.asUInt)
table.write(widx, wdata, wmask.asBools)
}
val update_u_mask = VecInit((0 until bankWidth*2) map {i => io.update_u_mask(i / 2)})
val update_u_wen = WireInit(doing_reset || doing_clear_u || update_u_mask.reduce(_||_))
val u_rdata = if (singlePorted) {
us.read(s1_hashed_idx, !update_u_wen && io.f1_req_valid)
} else {
us.read(s1_hashed_idx, io.f1_req_valid)
}
s2_req_rus := u_rdata
when (update_u_wen) {
val widx = Mux(doing_reset, reset_idx, Mux(doing_clear_u, clear_u_idx, update_idx))
val wdata = Mux(doing_reset || doing_clear_u, VecInit(0.U((bankWidth*2).W).asBools), VecInit(io.update_u.asUInt.asBools))
val wmask = Mux(doing_reset, ~(0.U((bankWidth*2).W)), Mux(doing_clear_u, clear_u_mask, update_u_mask.asUInt))
us.write(widx, wdata, wmask.asBools)
}
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048,
singlePorted: Boolean = false
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod, params.singlePorted))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(bankAlign(io.f0_pc))
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f2_resps = VecInit(tables.map(_.io.f2_resp))
val f3_resps = RegNext(f2_resps)
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var s2_provided = false.B
var s2_provider = 0.U
var s2_alt_provided = false.B
var s2_alt_provider = 0.U
for (i <- 0 until tageNTables) {
val hit = f2_resps(i)(w).valid
s2_alt_provided = s2_alt_provided || (s2_provided && hit)
s2_provided = s2_provided || hit
s2_alt_provider = Mux(hit, s2_provider, s2_alt_provider)
s2_provider = Mux(hit, i.U, s2_provider)
}
val s3_provided = RegNext(s2_provided)
val s3_provider = RegNext(s2_provider)
val s3_alt_provided = RegNext(s2_alt_provided)
val s3_alt_provider = RegNext(s2_alt_provider)
val prov = RegNext(f2_resps(s2_provider)(w).bits)
val alt = RegNext(f2_resps(s2_alt_provider)(w).bits)
io.resp.f3(w).taken := Mux(s3_provided,
Mux(prov.ctr === 3.U || prov.ctr === 4.U,
Mux(s3_alt_provided, alt.ctr(2), io.resp_in(0).f3(w).taken),
prov.ctr(2)),
io.resp_in(0).f3(w).taken
)
f3_meta.provider(w).valid := s3_provided
f3_meta.provider(w).bits := s3_provider
f3_meta.alt_differs(w) := s3_alt_provided && alt.ctr(2) =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := prov.u
f3_meta.provider_ctr(w) := prov.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(f3_meta.provider(w).bits)) & Fill(tageNTables, f3_meta.provider(w).valid))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
}
| module TageTable_3( // @[tage.scala:24:7]
input clock, // @[tage.scala:24:7]
input reset, // @[tage.scala:24:7]
input io_f1_req_valid, // @[tage.scala:31:14]
input [39:0] io_f1_req_pc, // @[tage.scala:31:14]
input [63:0] io_f1_req_ghist, // @[tage.scala:31:14]
output io_f2_resp_0_valid, // @[tage.scala:31:14]
output [2:0] io_f2_resp_0_bits_ctr, // @[tage.scala:31:14]
output [1:0] io_f2_resp_0_bits_u, // @[tage.scala:31:14]
output io_f2_resp_1_valid, // @[tage.scala:31:14]
output [2:0] io_f2_resp_1_bits_ctr, // @[tage.scala:31:14]
output [1:0] io_f2_resp_1_bits_u, // @[tage.scala:31:14]
output io_f2_resp_2_valid, // @[tage.scala:31:14]
output [2:0] io_f2_resp_2_bits_ctr, // @[tage.scala:31:14]
output [1:0] io_f2_resp_2_bits_u, // @[tage.scala:31:14]
output io_f2_resp_3_valid, // @[tage.scala:31:14]
output [2:0] io_f2_resp_3_bits_ctr, // @[tage.scala:31:14]
output [1:0] io_f2_resp_3_bits_u, // @[tage.scala:31:14]
input io_update_mask_0, // @[tage.scala:31:14]
input io_update_mask_1, // @[tage.scala:31:14]
input io_update_mask_2, // @[tage.scala:31:14]
input io_update_mask_3, // @[tage.scala:31:14]
input io_update_taken_0, // @[tage.scala:31:14]
input io_update_taken_1, // @[tage.scala:31:14]
input io_update_taken_2, // @[tage.scala:31:14]
input io_update_taken_3, // @[tage.scala:31:14]
input io_update_alloc_0, // @[tage.scala:31:14]
input io_update_alloc_1, // @[tage.scala:31:14]
input io_update_alloc_2, // @[tage.scala:31:14]
input io_update_alloc_3, // @[tage.scala:31:14]
input [2:0] io_update_old_ctr_0, // @[tage.scala:31:14]
input [2:0] io_update_old_ctr_1, // @[tage.scala:31:14]
input [2:0] io_update_old_ctr_2, // @[tage.scala:31:14]
input [2:0] io_update_old_ctr_3, // @[tage.scala:31:14]
input [39:0] io_update_pc, // @[tage.scala:31:14]
input [63:0] io_update_hist, // @[tage.scala:31:14]
input io_update_u_mask_0, // @[tage.scala:31:14]
input io_update_u_mask_1, // @[tage.scala:31:14]
input io_update_u_mask_2, // @[tage.scala:31:14]
input io_update_u_mask_3, // @[tage.scala:31:14]
input [1:0] io_update_u_0, // @[tage.scala:31:14]
input [1:0] io_update_u_1, // @[tage.scala:31:14]
input [1:0] io_update_u_2, // @[tage.scala:31:14]
input [1:0] io_update_u_3 // @[tage.scala:31:14]
);
wire [47:0] _tage_table_16_R0_data; // @[tage.scala:90:27]
wire [7:0] _tage_u_16_R0_data; // @[tage.scala:89:27]
wire io_f1_req_valid_0 = io_f1_req_valid; // @[tage.scala:24:7]
wire [39:0] io_f1_req_pc_0 = io_f1_req_pc; // @[tage.scala:24:7]
wire [63:0] io_f1_req_ghist_0 = io_f1_req_ghist; // @[tage.scala:24:7]
wire io_update_mask_0_0 = io_update_mask_0; // @[tage.scala:24:7]
wire io_update_mask_1_0 = io_update_mask_1; // @[tage.scala:24:7]
wire io_update_mask_2_0 = io_update_mask_2; // @[tage.scala:24:7]
wire io_update_mask_3_0 = io_update_mask_3; // @[tage.scala:24:7]
wire io_update_taken_0_0 = io_update_taken_0; // @[tage.scala:24:7]
wire io_update_taken_1_0 = io_update_taken_1; // @[tage.scala:24:7]
wire io_update_taken_2_0 = io_update_taken_2; // @[tage.scala:24:7]
wire io_update_taken_3_0 = io_update_taken_3; // @[tage.scala:24:7]
wire io_update_alloc_0_0 = io_update_alloc_0; // @[tage.scala:24:7]
wire io_update_alloc_1_0 = io_update_alloc_1; // @[tage.scala:24:7]
wire io_update_alloc_2_0 = io_update_alloc_2; // @[tage.scala:24:7]
wire io_update_alloc_3_0 = io_update_alloc_3; // @[tage.scala:24:7]
wire [2:0] io_update_old_ctr_0_0 = io_update_old_ctr_0; // @[tage.scala:24:7]
wire [2:0] io_update_old_ctr_1_0 = io_update_old_ctr_1; // @[tage.scala:24:7]
wire [2:0] io_update_old_ctr_2_0 = io_update_old_ctr_2; // @[tage.scala:24:7]
wire [2:0] io_update_old_ctr_3_0 = io_update_old_ctr_3; // @[tage.scala:24:7]
wire [39:0] io_update_pc_0 = io_update_pc; // @[tage.scala:24:7]
wire [63:0] io_update_hist_0 = io_update_hist; // @[tage.scala:24:7]
wire io_update_u_mask_0_0 = io_update_u_mask_0; // @[tage.scala:24:7]
wire io_update_u_mask_1_0 = io_update_u_mask_1; // @[tage.scala:24:7]
wire io_update_u_mask_2_0 = io_update_u_mask_2; // @[tage.scala:24:7]
wire io_update_u_mask_3_0 = io_update_u_mask_3; // @[tage.scala:24:7]
wire [1:0] io_update_u_0_0 = io_update_u_0; // @[tage.scala:24:7]
wire [1:0] io_update_u_1_0 = io_update_u_1; // @[tage.scala:24:7]
wire [1:0] io_update_u_2_0 = io_update_u_2; // @[tage.scala:24:7]
wire [1:0] io_update_u_3_0 = io_update_u_3; // @[tage.scala:24:7]
wire update_wdata_0_valid = 1'h1; // @[tage.scala:120:26]
wire update_wdata_1_valid = 1'h1; // @[tage.scala:120:26]
wire update_wdata_2_valid = 1'h1; // @[tage.scala:120:26]
wire update_wdata_3_valid = 1'h1; // @[tage.scala:120:26]
wire [11:0] _wdata_WIRE_0 = 12'h0; // @[tage.scala:130:41]
wire [11:0] _wdata_WIRE_1 = 12'h0; // @[tage.scala:130:41]
wire [11:0] _wdata_WIRE_2 = 12'h0; // @[tage.scala:130:41]
wire [11:0] _wdata_WIRE_3 = 12'h0; // @[tage.scala:130:41]
wire [3:0] _wmask_T = 4'hF; // @[tage.scala:131:34]
wire _wdata_WIRE_2_0 = 1'h0; // @[tage.scala:145:58]
wire _wdata_WIRE_2_1 = 1'h0; // @[tage.scala:145:58]
wire _wdata_WIRE_2_2 = 1'h0; // @[tage.scala:145:58]
wire _wdata_WIRE_2_3 = 1'h0; // @[tage.scala:145:58]
wire _wdata_WIRE_2_4 = 1'h0; // @[tage.scala:145:58]
wire _wdata_WIRE_2_5 = 1'h0; // @[tage.scala:145:58]
wire _wdata_WIRE_2_6 = 1'h0; // @[tage.scala:145:58]
wire _wdata_WIRE_2_7 = 1'h0; // @[tage.scala:145:58]
wire [7:0] _wmask_T_2 = 8'hFF; // @[tage.scala:146:34]
wire s2_req_rhits_0; // @[tage.scala:100:29]
wire [2:0] s2_req_rtage_0_ctr; // @[tage.scala:98:26]
wire [1:0] _io_f2_resp_0_bits_u_T; // @[tage.scala:105:34]
wire s2_req_rhits_1; // @[tage.scala:100:29]
wire [2:0] s2_req_rtage_1_ctr; // @[tage.scala:98:26]
wire [1:0] _io_f2_resp_1_bits_u_T; // @[tage.scala:105:34]
wire s2_req_rhits_2; // @[tage.scala:100:29]
wire [2:0] s2_req_rtage_2_ctr; // @[tage.scala:98:26]
wire [1:0] _io_f2_resp_2_bits_u_T; // @[tage.scala:105:34]
wire s2_req_rhits_3; // @[tage.scala:100:29]
wire [2:0] s2_req_rtage_3_ctr; // @[tage.scala:98:26]
wire [1:0] _io_f2_resp_3_bits_u_T; // @[tage.scala:105:34]
wire update_u_mask_0 = io_update_u_mask_0_0; // @[tage.scala:24:7, :135:30]
wire update_u_mask_1 = io_update_u_mask_0_0; // @[tage.scala:24:7, :135:30]
wire update_u_mask_2 = io_update_u_mask_1_0; // @[tage.scala:24:7, :135:30]
wire update_u_mask_3 = io_update_u_mask_1_0; // @[tage.scala:24:7, :135:30]
wire update_u_mask_4 = io_update_u_mask_2_0; // @[tage.scala:24:7, :135:30]
wire update_u_mask_5 = io_update_u_mask_2_0; // @[tage.scala:24:7, :135:30]
wire update_u_mask_6 = io_update_u_mask_3_0; // @[tage.scala:24:7, :135:30]
wire update_u_mask_7 = io_update_u_mask_3_0; // @[tage.scala:24:7, :135:30]
wire [2:0] io_f2_resp_0_bits_ctr_0; // @[tage.scala:24:7]
wire [1:0] io_f2_resp_0_bits_u_0; // @[tage.scala:24:7]
wire io_f2_resp_0_valid_0; // @[tage.scala:24:7]
wire [2:0] io_f2_resp_1_bits_ctr_0; // @[tage.scala:24:7]
wire [1:0] io_f2_resp_1_bits_u_0; // @[tage.scala:24:7]
wire io_f2_resp_1_valid_0; // @[tage.scala:24:7]
wire [2:0] io_f2_resp_2_bits_ctr_0; // @[tage.scala:24:7]
wire [1:0] io_f2_resp_2_bits_u_0; // @[tage.scala:24:7]
wire io_f2_resp_2_valid_0; // @[tage.scala:24:7]
wire [2:0] io_f2_resp_3_bits_ctr_0; // @[tage.scala:24:7]
wire [1:0] io_f2_resp_3_bits_u_0; // @[tage.scala:24:7]
wire io_f2_resp_3_valid_0; // @[tage.scala:24:7]
reg doing_reset; // @[tage.scala:72:28]
reg [7:0] reset_idx; // @[tage.scala:73:26]
wire [8:0] _GEN = {1'h0, reset_idx}; // @[tage.scala:73:26, :74:26]
wire [8:0] _reset_idx_T = _GEN + {8'h0, doing_reset}; // @[tage.scala:72:28, :74:26]
wire [7:0] _reset_idx_T_1 = _reset_idx_T[7:0]; // @[tage.scala:74:26]
wire [7:0] idx_history_hist_chunks_0 = io_f1_req_ghist_0[7:0]; // @[tage.scala:24:7, :53:11]
wire [7:0] tag_history_hist_chunks_0 = io_f1_req_ghist_0[7:0]; // @[tage.scala:24:7, :53:11]
wire [7:0] idx_history_hist_chunks_1 = io_f1_req_ghist_0[15:8]; // @[tage.scala:24:7, :53:11]
wire [7:0] tag_history_hist_chunks_1 = io_f1_req_ghist_0[15:8]; // @[tage.scala:24:7, :53:11]
wire [7:0] idx_history = idx_history_hist_chunks_0 ^ idx_history_hist_chunks_1; // @[tage.scala:53:11, :55:25]
wire [28:0] _tag_T = io_f1_req_pc_0[39:11]; // @[frontend.scala:149:35]
wire [36:0] _idx_T = {_tag_T, io_f1_req_pc_0[10:3] ^ idx_history}; // @[frontend.scala:149:35]
wire [7:0] s1_hashed_idx = _idx_T[7:0]; // @[tage.scala:60:{29,43}]
wire [7:0] _rdata_WIRE = s1_hashed_idx; // @[tage.scala:60:43, :122:99]
wire [7:0] _u_rdata_WIRE = s1_hashed_idx; // @[tage.scala:60:43, :140:12]
wire [7:0] tag_history = tag_history_hist_chunks_0 ^ tag_history_hist_chunks_1; // @[tage.scala:53:11, :55:25]
wire [28:0] _tag_T_1 = {_tag_T[28:8], _tag_T[7:0] ^ tag_history}; // @[tage.scala:55:25, :62:{30,50}]
wire [7:0] s1_tag = _tag_T_1[7:0]; // @[tage.scala:62:{50,64}]
wire wdata_1_0; // @[tage.scala:145:20]
wire wdata_1_1; // @[tage.scala:145:20]
wire wdata_1_2; // @[tage.scala:145:20]
wire wdata_1_3; // @[tage.scala:145:20]
wire wdata_1_4; // @[tage.scala:145:20]
wire wdata_1_5; // @[tage.scala:145:20]
wire wdata_1_6; // @[tage.scala:145:20]
wire wdata_1_7; // @[tage.scala:145:20]
wire s2_req_rus_0 = _tage_u_16_R0_data[0]; // @[tage.scala:89:27, :99:24]
wire s2_req_rus_1 = _tage_u_16_R0_data[1]; // @[tage.scala:89:27, :99:24]
wire s2_req_rus_2 = _tage_u_16_R0_data[2]; // @[tage.scala:89:27, :99:24]
wire s2_req_rus_3 = _tage_u_16_R0_data[3]; // @[tage.scala:89:27, :99:24]
wire s2_req_rus_4 = _tage_u_16_R0_data[4]; // @[tage.scala:89:27, :99:24]
wire s2_req_rus_5 = _tage_u_16_R0_data[5]; // @[tage.scala:89:27, :99:24]
wire s2_req_rus_6 = _tage_u_16_R0_data[6]; // @[tage.scala:89:27, :99:24]
wire s2_req_rus_7 = _tage_u_16_R0_data[7]; // @[tage.scala:89:27, :99:24]
wire [11:0] wdata_0; // @[tage.scala:130:20]
wire [11:0] wdata_1; // @[tage.scala:130:20]
wire [11:0] wdata_2; // @[tage.scala:130:20]
wire [11:0] wdata_3; // @[tage.scala:130:20]
reg [7:0] s2_tag; // @[tage.scala:96:29]
assign io_f2_resp_0_bits_ctr_0 = s2_req_rtage_0_ctr; // @[tage.scala:24:7, :98:26]
assign io_f2_resp_1_bits_ctr_0 = s2_req_rtage_1_ctr; // @[tage.scala:24:7, :98:26]
assign io_f2_resp_2_bits_ctr_0 = s2_req_rtage_2_ctr; // @[tage.scala:24:7, :98:26]
assign io_f2_resp_3_bits_ctr_0 = s2_req_rtage_3_ctr; // @[tage.scala:24:7, :98:26]
wire s2_req_rtage_0_valid; // @[tage.scala:98:26]
wire [7:0] s2_req_rtage_0_tag; // @[tage.scala:98:26]
wire s2_req_rtage_1_valid; // @[tage.scala:98:26]
wire [7:0] s2_req_rtage_1_tag; // @[tage.scala:98:26]
wire s2_req_rtage_2_valid; // @[tage.scala:98:26]
wire [7:0] s2_req_rtage_2_tag; // @[tage.scala:98:26]
wire s2_req_rtage_3_valid; // @[tage.scala:98:26]
wire [7:0] s2_req_rtage_3_tag; // @[tage.scala:98:26]
wire _s2_req_rhits_T = s2_req_rtage_0_tag == s2_tag; // @[tage.scala:96:29, :98:26, :100:69]
wire _s2_req_rhits_T_1 = s2_req_rtage_0_valid & _s2_req_rhits_T; // @[tage.scala:98:26, :100:{60,69}]
wire _s2_req_rhits_T_2 = ~doing_reset; // @[tage.scala:72:28, :100:83]
wire _s2_req_rhits_T_3 = _s2_req_rhits_T_1 & _s2_req_rhits_T_2; // @[tage.scala:100:{60,80,83}]
assign s2_req_rhits_0 = _s2_req_rhits_T_3; // @[tage.scala:100:{29,80}]
wire _s2_req_rhits_T_4 = s2_req_rtage_1_tag == s2_tag; // @[tage.scala:96:29, :98:26, :100:69]
wire _s2_req_rhits_T_5 = s2_req_rtage_1_valid & _s2_req_rhits_T_4; // @[tage.scala:98:26, :100:{60,69}]
wire _s2_req_rhits_T_6 = ~doing_reset; // @[tage.scala:72:28, :100:83]
wire _s2_req_rhits_T_7 = _s2_req_rhits_T_5 & _s2_req_rhits_T_6; // @[tage.scala:100:{60,80,83}]
assign s2_req_rhits_1 = _s2_req_rhits_T_7; // @[tage.scala:100:{29,80}]
wire _s2_req_rhits_T_8 = s2_req_rtage_2_tag == s2_tag; // @[tage.scala:96:29, :98:26, :100:69]
wire _s2_req_rhits_T_9 = s2_req_rtage_2_valid & _s2_req_rhits_T_8; // @[tage.scala:98:26, :100:{60,69}]
wire _s2_req_rhits_T_10 = ~doing_reset; // @[tage.scala:72:28, :100:83]
wire _s2_req_rhits_T_11 = _s2_req_rhits_T_9 & _s2_req_rhits_T_10; // @[tage.scala:100:{60,80,83}]
assign s2_req_rhits_2 = _s2_req_rhits_T_11; // @[tage.scala:100:{29,80}]
wire _s2_req_rhits_T_12 = s2_req_rtage_3_tag == s2_tag; // @[tage.scala:96:29, :98:26, :100:69]
wire _s2_req_rhits_T_13 = s2_req_rtage_3_valid & _s2_req_rhits_T_12; // @[tage.scala:98:26, :100:{60,69}]
wire _s2_req_rhits_T_14 = ~doing_reset; // @[tage.scala:72:28, :100:83]
wire _s2_req_rhits_T_15 = _s2_req_rhits_T_13 & _s2_req_rhits_T_14; // @[tage.scala:100:{60,80,83}]
assign s2_req_rhits_3 = _s2_req_rhits_T_15; // @[tage.scala:100:{29,80}]
assign io_f2_resp_0_valid_0 = s2_req_rhits_0; // @[tage.scala:24:7, :100:29]
assign io_f2_resp_1_valid_0 = s2_req_rhits_1; // @[tage.scala:24:7, :100:29]
assign io_f2_resp_2_valid_0 = s2_req_rhits_2; // @[tage.scala:24:7, :100:29]
assign io_f2_resp_3_valid_0 = s2_req_rhits_3; // @[tage.scala:24:7, :100:29]
assign _io_f2_resp_0_bits_u_T = {s2_req_rus_1, s2_req_rus_0}; // @[tage.scala:99:24, :105:34]
assign io_f2_resp_0_bits_u_0 = _io_f2_resp_0_bits_u_T; // @[tage.scala:24:7, :105:34]
assign _io_f2_resp_1_bits_u_T = {s2_req_rus_3, s2_req_rus_2}; // @[tage.scala:99:24, :105:34]
assign io_f2_resp_1_bits_u_0 = _io_f2_resp_1_bits_u_T; // @[tage.scala:24:7, :105:34]
assign _io_f2_resp_2_bits_u_T = {s2_req_rus_5, s2_req_rus_4}; // @[tage.scala:99:24, :105:34]
assign io_f2_resp_2_bits_u_0 = _io_f2_resp_2_bits_u_T; // @[tage.scala:24:7, :105:34]
assign _io_f2_resp_3_bits_u_T = {s2_req_rus_7, s2_req_rus_6}; // @[tage.scala:99:24, :105:34]
assign io_f2_resp_3_bits_u_0 = _io_f2_resp_3_bits_u_T; // @[tage.scala:24:7, :105:34]
reg [19:0] clear_u_ctr; // @[tage.scala:109:28]
wire [20:0] _clear_u_ctr_T = {1'h0, clear_u_ctr} + 21'h1; // @[tage.scala:109:28, :110:85]
wire [19:0] _clear_u_ctr_T_1 = _clear_u_ctr_T[19:0]; // @[tage.scala:110:85]
wire [10:0] _doing_clear_u_T = clear_u_ctr[10:0]; // @[tage.scala:109:28, :112:34]
wire doing_clear_u = _doing_clear_u_T == 11'h0; // @[tage.scala:112:{34,61}]
wire _clear_u_hi_T = clear_u_ctr[19]; // @[tage.scala:109:28, :113:31]
wire _clear_u_lo_T = clear_u_ctr[19]; // @[tage.scala:109:28, :113:31, :114:31]
wire clear_u_hi = _clear_u_hi_T; // @[tage.scala:113:{31,72}]
wire _clear_u_mask_WIRE_1 = clear_u_hi; // @[tage.scala:113:72, :116:29]
wire _clear_u_mask_WIRE_3 = clear_u_hi; // @[tage.scala:113:72, :116:29]
wire _clear_u_mask_WIRE_5 = clear_u_hi; // @[tage.scala:113:72, :116:29]
wire _clear_u_mask_WIRE_7 = clear_u_hi; // @[tage.scala:113:72, :116:29]
wire clear_u_lo = ~_clear_u_lo_T; // @[tage.scala:114:{31,72}]
wire _clear_u_mask_WIRE_0 = clear_u_lo; // @[tage.scala:114:72, :116:29]
wire _clear_u_mask_WIRE_2 = clear_u_lo; // @[tage.scala:114:72, :116:29]
wire _clear_u_mask_WIRE_4 = clear_u_lo; // @[tage.scala:114:72, :116:29]
wire _clear_u_mask_WIRE_6 = clear_u_lo; // @[tage.scala:114:72, :116:29]
wire [8:0] clear_u_idx = clear_u_ctr[19:11]; // @[tage.scala:109:28, :115:33]
wire [1:0] clear_u_mask_lo_lo = {_clear_u_mask_WIRE_1, _clear_u_mask_WIRE_0}; // @[tage.scala:116:{29,109}]
wire [1:0] clear_u_mask_lo_hi = {_clear_u_mask_WIRE_3, _clear_u_mask_WIRE_2}; // @[tage.scala:116:{29,109}]
wire [3:0] clear_u_mask_lo = {clear_u_mask_lo_hi, clear_u_mask_lo_lo}; // @[tage.scala:116:109]
wire [1:0] clear_u_mask_hi_lo = {_clear_u_mask_WIRE_5, _clear_u_mask_WIRE_4}; // @[tage.scala:116:{29,109}]
wire [1:0] clear_u_mask_hi_hi = {_clear_u_mask_WIRE_7, _clear_u_mask_WIRE_6}; // @[tage.scala:116:{29,109}]
wire [3:0] clear_u_mask_hi = {clear_u_mask_hi_hi, clear_u_mask_hi_lo}; // @[tage.scala:116:109]
wire [7:0] clear_u_mask = {clear_u_mask_hi, clear_u_mask_lo}; // @[tage.scala:116:109]
wire [7:0] idx_history_hist_chunks_0_1 = io_update_hist_0[7:0]; // @[tage.scala:24:7, :53:11]
wire [7:0] tag_history_hist_chunks_0_1 = io_update_hist_0[7:0]; // @[tage.scala:24:7, :53:11]
wire [7:0] idx_history_hist_chunks_1_1 = io_update_hist_0[15:8]; // @[tage.scala:24:7, :53:11]
wire [7:0] tag_history_hist_chunks_1_1 = io_update_hist_0[15:8]; // @[tage.scala:24:7, :53:11]
wire [7:0] idx_history_1 = idx_history_hist_chunks_0_1 ^ idx_history_hist_chunks_1_1; // @[tage.scala:53:11, :55:25]
wire [28:0] _tag_T_2 = io_update_pc_0[39:11]; // @[frontend.scala:149:35]
wire [36:0] _idx_T_1 = {_tag_T_2, io_update_pc_0[10:3] ^ idx_history_1}; // @[frontend.scala:149:35]
wire [7:0] update_idx = _idx_T_1[7:0]; // @[tage.scala:60:{29,43}]
wire [7:0] tag_history_1 = tag_history_hist_chunks_0_1 ^ tag_history_hist_chunks_1_1; // @[tage.scala:53:11, :55:25]
wire [28:0] _tag_T_3 = {_tag_T_2[28:8], _tag_T_2[7:0] ^ tag_history_1}; // @[tage.scala:55:25, :62:{30,50}]
wire [7:0] update_tag = _tag_T_3[7:0]; // @[tage.scala:62:{50,64}]
wire [7:0] update_wdata_0_tag = update_tag; // @[tage.scala:62:64, :120:26]
wire [7:0] update_wdata_1_tag = update_tag; // @[tage.scala:62:64, :120:26]
wire [7:0] update_wdata_2_tag = update_tag; // @[tage.scala:62:64, :120:26]
wire [7:0] update_wdata_3_tag = update_tag; // @[tage.scala:62:64, :120:26]
wire [2:0] _update_wdata_0_ctr_T_22; // @[tage.scala:168:33]
wire [2:0] _update_wdata_1_ctr_T_22; // @[tage.scala:168:33]
wire [2:0] _update_wdata_2_ctr_T_22; // @[tage.scala:168:33]
wire [2:0] _update_wdata_3_ctr_T_22; // @[tage.scala:168:33]
wire [2:0] update_wdata_0_ctr; // @[tage.scala:120:26]
wire [2:0] update_wdata_1_ctr; // @[tage.scala:120:26]
wire [2:0] update_wdata_2_ctr; // @[tage.scala:120:26]
wire [2:0] update_wdata_3_ctr; // @[tage.scala:120:26]
wire _wen_T = io_update_mask_0_0 | io_update_mask_1_0; // @[tage.scala:24:7, :121:60]
wire _wen_T_1 = _wen_T | io_update_mask_2_0; // @[tage.scala:24:7, :121:60]
wire _wen_T_2 = _wen_T_1 | io_update_mask_3_0; // @[tage.scala:24:7, :121:60]
wire _wen_T_3 = doing_reset | _wen_T_2; // @[tage.scala:72:28, :121:{34,60}]
wire wen = _wen_T_3; // @[tage.scala:121:{21,34}]
reg REG; // @[tage.scala:123:16]
assign s2_req_rtage_0_ctr = _tage_table_16_R0_data[2:0]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_0_tag = _tage_table_16_R0_data[10:3]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_0_valid = _tage_table_16_R0_data[11]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_1_ctr = _tage_table_16_R0_data[14:12]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_1_tag = _tage_table_16_R0_data[22:15]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_1_valid = _tage_table_16_R0_data[23]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_2_ctr = _tage_table_16_R0_data[26:24]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_2_tag = _tage_table_16_R0_data[34:27]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_2_valid = _tage_table_16_R0_data[35]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_3_ctr = _tage_table_16_R0_data[38:36]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_3_tag = _tage_table_16_R0_data[46:39]; // @[tage.scala:90:27, :98:26, :126:49]
assign s2_req_rtage_3_valid = _tage_table_16_R0_data[47]; // @[tage.scala:90:27, :98:26, :126:49]
wire [7:0] widx = doing_reset ? reset_idx : update_idx; // @[tage.scala:60:43, :72:28, :73:26, :129:19]
wire [8:0] wdata_hi = {1'h1, update_wdata_0_tag}; // @[tage.scala:120:26, :130:114]
wire [11:0] _wdata_T = {wdata_hi, update_wdata_0_ctr}; // @[tage.scala:120:26, :130:114]
wire [11:0] _wdata_WIRE_1_0 = _wdata_T; // @[tage.scala:130:{94,114}]
wire [8:0] wdata_hi_1 = {1'h1, update_wdata_1_tag}; // @[tage.scala:120:26, :130:114]
wire [11:0] _wdata_T_1 = {wdata_hi_1, update_wdata_1_ctr}; // @[tage.scala:120:26, :130:114]
wire [11:0] _wdata_WIRE_1_1 = _wdata_T_1; // @[tage.scala:130:{94,114}]
wire [8:0] wdata_hi_2 = {1'h1, update_wdata_2_tag}; // @[tage.scala:120:26, :130:114]
wire [11:0] _wdata_T_2 = {wdata_hi_2, update_wdata_2_ctr}; // @[tage.scala:120:26, :130:114]
wire [11:0] _wdata_WIRE_1_2 = _wdata_T_2; // @[tage.scala:130:{94,114}]
wire [8:0] wdata_hi_3 = {1'h1, update_wdata_3_tag}; // @[tage.scala:120:26, :130:114]
wire [11:0] _wdata_T_3 = {wdata_hi_3, update_wdata_3_ctr}; // @[tage.scala:120:26, :130:114]
wire [11:0] _wdata_WIRE_1_3 = _wdata_T_3; // @[tage.scala:130:{94,114}]
assign wdata_0 = doing_reset ? 12'h0 : _wdata_WIRE_1_0; // @[tage.scala:72:28, :130:{20,94}]
assign wdata_1 = doing_reset ? 12'h0 : _wdata_WIRE_1_1; // @[tage.scala:72:28, :130:{20,94}]
assign wdata_2 = doing_reset ? 12'h0 : _wdata_WIRE_1_2; // @[tage.scala:72:28, :130:{20,94}]
assign wdata_3 = doing_reset ? 12'h0 : _wdata_WIRE_1_3; // @[tage.scala:72:28, :130:{20,94}]
wire [1:0] wmask_lo = {io_update_mask_1_0, io_update_mask_0_0}; // @[tage.scala:24:7, :131:70]
wire [1:0] wmask_hi = {io_update_mask_3_0, io_update_mask_2_0}; // @[tage.scala:24:7, :131:70]
wire [3:0] _wmask_T_1 = {wmask_hi, wmask_lo}; // @[tage.scala:131:70]
wire [3:0] wmask = doing_reset ? 4'hF : _wmask_T_1; // @[tage.scala:72:28, :131:{20,70}]
wire _GEN_0 = doing_reset | doing_clear_u; // @[tage.scala:72:28, :112:61, :136:43]
wire _update_u_wen_T; // @[tage.scala:136:43]
assign _update_u_wen_T = _GEN_0; // @[tage.scala:136:43]
wire _wdata_T_4; // @[tage.scala:145:33]
assign _wdata_T_4 = _GEN_0; // @[tage.scala:136:43, :145:33]
wire _update_u_wen_T_1 = update_u_mask_0 | update_u_mask_1; // @[tage.scala:135:30, :136:85]
wire _update_u_wen_T_2 = _update_u_wen_T_1 | update_u_mask_2; // @[tage.scala:135:30, :136:85]
wire _update_u_wen_T_3 = _update_u_wen_T_2 | update_u_mask_3; // @[tage.scala:135:30, :136:85]
wire _update_u_wen_T_4 = _update_u_wen_T_3 | update_u_mask_4; // @[tage.scala:135:30, :136:85]
wire _update_u_wen_T_5 = _update_u_wen_T_4 | update_u_mask_5; // @[tage.scala:135:30, :136:85]
wire _update_u_wen_T_6 = _update_u_wen_T_5 | update_u_mask_6; // @[tage.scala:135:30, :136:85]
wire _update_u_wen_T_7 = _update_u_wen_T_6 | update_u_mask_7; // @[tage.scala:135:30, :136:85]
wire _update_u_wen_T_8 = _update_u_wen_T | _update_u_wen_T_7; // @[tage.scala:136:{43,60,85}]
wire update_u_wen = _update_u_wen_T_8; // @[tage.scala:136:{30,60}]
wire [8:0] _widx_T = doing_clear_u ? clear_u_idx : {1'h0, update_idx}; // @[tage.scala:60:43, :112:61, :115:33, :144:47]
wire [8:0] widx_1 = doing_reset ? _GEN : _widx_T; // @[tage.scala:72:28, :74:26, :144:{19,47}]
wire [3:0] wdata_lo = {io_update_u_1_0, io_update_u_0_0}; // @[tage.scala:24:7, :145:110]
wire [3:0] wdata_hi_4 = {io_update_u_3_0, io_update_u_2_0}; // @[tage.scala:24:7, :145:110]
wire [7:0] _wdata_T_5 = {wdata_hi_4, wdata_lo}; // @[tage.scala:145:110]
wire _wdata_T_6 = _wdata_T_5[0]; // @[tage.scala:145:{110,117}]
wire _wdata_WIRE_3_0 = _wdata_T_6; // @[tage.scala:145:{97,117}]
wire _wdata_T_7 = _wdata_T_5[1]; // @[tage.scala:145:{110,117}]
wire _wdata_WIRE_3_1 = _wdata_T_7; // @[tage.scala:145:{97,117}]
wire _wdata_T_8 = _wdata_T_5[2]; // @[tage.scala:145:{110,117}]
wire _wdata_WIRE_3_2 = _wdata_T_8; // @[tage.scala:145:{97,117}]
wire _wdata_T_9 = _wdata_T_5[3]; // @[tage.scala:145:{110,117}]
wire _wdata_WIRE_3_3 = _wdata_T_9; // @[tage.scala:145:{97,117}]
wire _wdata_T_10 = _wdata_T_5[4]; // @[tage.scala:145:{110,117}]
wire _wdata_WIRE_3_4 = _wdata_T_10; // @[tage.scala:145:{97,117}]
wire _wdata_T_11 = _wdata_T_5[5]; // @[tage.scala:145:{110,117}]
wire _wdata_WIRE_3_5 = _wdata_T_11; // @[tage.scala:145:{97,117}]
wire _wdata_T_12 = _wdata_T_5[6]; // @[tage.scala:145:{110,117}]
wire _wdata_WIRE_3_6 = _wdata_T_12; // @[tage.scala:145:{97,117}]
wire _wdata_T_13 = _wdata_T_5[7]; // @[tage.scala:145:{110,117}]
wire _wdata_WIRE_3_7 = _wdata_T_13; // @[tage.scala:145:{97,117}]
assign wdata_1_0 = ~_wdata_T_4 & _wdata_WIRE_3_0; // @[tage.scala:145:{20,33,97}]
assign wdata_1_1 = ~_wdata_T_4 & _wdata_WIRE_3_1; // @[tage.scala:145:{20,33,97}]
assign wdata_1_2 = ~_wdata_T_4 & _wdata_WIRE_3_2; // @[tage.scala:145:{20,33,97}]
assign wdata_1_3 = ~_wdata_T_4 & _wdata_WIRE_3_3; // @[tage.scala:145:{20,33,97}]
assign wdata_1_4 = ~_wdata_T_4 & _wdata_WIRE_3_4; // @[tage.scala:145:{20,33,97}]
assign wdata_1_5 = ~_wdata_T_4 & _wdata_WIRE_3_5; // @[tage.scala:145:{20,33,97}]
assign wdata_1_6 = ~_wdata_T_4 & _wdata_WIRE_3_6; // @[tage.scala:145:{20,33,97}]
assign wdata_1_7 = ~_wdata_T_4 & _wdata_WIRE_3_7; // @[tage.scala:145:{20,33,97}]
wire [1:0] wmask_lo_lo = {update_u_mask_1, update_u_mask_0}; // @[tage.scala:135:30, :146:106]
wire [1:0] wmask_lo_hi = {update_u_mask_3, update_u_mask_2}; // @[tage.scala:135:30, :146:106]
wire [3:0] wmask_lo_1 = {wmask_lo_hi, wmask_lo_lo}; // @[tage.scala:146:106]
wire [1:0] wmask_hi_lo = {update_u_mask_5, update_u_mask_4}; // @[tage.scala:135:30, :146:106]
wire [1:0] wmask_hi_hi = {update_u_mask_7, update_u_mask_6}; // @[tage.scala:135:30, :146:106]
wire [3:0] wmask_hi_1 = {wmask_hi_hi, wmask_hi_lo}; // @[tage.scala:146:106]
wire [7:0] _wmask_T_3 = {wmask_hi_1, wmask_lo_1}; // @[tage.scala:146:106]
wire [7:0] _wmask_T_4 = doing_clear_u ? clear_u_mask : _wmask_T_3; // @[tage.scala:112:61, :116:109, :146:{62,106}]
wire [7:0] wmask_1 = doing_reset ? 8'hFF : _wmask_T_4; // @[tage.scala:72:28, :146:{20,62}]
reg [7:0] wrbypass_tags_0; // @[tage.scala:154:29]
reg [7:0] wrbypass_tags_1; // @[tage.scala:154:29]
reg [7:0] wrbypass_idxs_0; // @[tage.scala:155:29]
reg [7:0] wrbypass_idxs_1; // @[tage.scala:155:29]
reg [2:0] wrbypass_0_0; // @[tage.scala:156:29]
reg [2:0] wrbypass_0_1; // @[tage.scala:156:29]
reg [2:0] wrbypass_0_2; // @[tage.scala:156:29]
reg [2:0] wrbypass_0_3; // @[tage.scala:156:29]
reg [2:0] wrbypass_1_0; // @[tage.scala:156:29]
reg [2:0] wrbypass_1_1; // @[tage.scala:156:29]
reg [2:0] wrbypass_1_2; // @[tage.scala:156:29]
reg [2:0] wrbypass_1_3; // @[tage.scala:156:29]
reg wrbypass_enq_idx; // @[tage.scala:157:33]
wire _wrbypass_hits_T = ~doing_reset; // @[tage.scala:72:28, :100:83, :160:5]
wire _wrbypass_hits_T_1 = wrbypass_tags_0 == update_tag; // @[tage.scala:62:64, :154:29, :161:22]
wire _wrbypass_hits_T_2 = _wrbypass_hits_T & _wrbypass_hits_T_1; // @[tage.scala:160:{5,18}, :161:22]
wire _wrbypass_hits_T_3 = wrbypass_idxs_0 == update_idx; // @[tage.scala:60:43, :155:29, :162:22]
wire _wrbypass_hits_T_4 = _wrbypass_hits_T_2 & _wrbypass_hits_T_3; // @[tage.scala:160:18, :161:37, :162:22]
wire wrbypass_hits_0 = _wrbypass_hits_T_4; // @[tage.scala:159:33, :161:37]
wire _wrbypass_hits_T_5 = ~doing_reset; // @[tage.scala:72:28, :100:83, :160:5]
wire _wrbypass_hits_T_6 = wrbypass_tags_1 == update_tag; // @[tage.scala:62:64, :154:29, :161:22]
wire _wrbypass_hits_T_7 = _wrbypass_hits_T_5 & _wrbypass_hits_T_6; // @[tage.scala:160:{5,18}, :161:22]
wire _wrbypass_hits_T_8 = wrbypass_idxs_1 == update_idx; // @[tage.scala:60:43, :155:29, :162:22]
wire _wrbypass_hits_T_9 = _wrbypass_hits_T_7 & _wrbypass_hits_T_8; // @[tage.scala:160:18, :161:37, :162:22]
wire wrbypass_hits_1 = _wrbypass_hits_T_9; // @[tage.scala:159:33, :161:37]
wire wrbypass_hit = wrbypass_hits_0 | wrbypass_hits_1; // @[tage.scala:159:33, :164:48]
wire wrbypass_hit_idx = ~wrbypass_hits_0; // @[Mux.scala:50:70]
wire [2:0] _update_wdata_0_ctr_T = io_update_taken_0_0 ? 3'h4 : 3'h3; // @[tage.scala:24:7, :169:10]
wire _update_wdata_0_ctr_T_1 = ~io_update_taken_0_0; // @[tage.scala:24:7, :67:9]
wire [2:0] _GEN_1 = wrbypass_hit_idx ? wrbypass_1_0 : wrbypass_0_0; // @[Mux.scala:50:70]
wire [2:0] _GEN_2 = wrbypass_hit_idx ? wrbypass_1_1 : wrbypass_0_1; // @[Mux.scala:50:70]
wire [2:0] _GEN_3 = wrbypass_hit_idx ? wrbypass_1_2 : wrbypass_0_2; // @[Mux.scala:50:70]
wire [2:0] _GEN_4 = wrbypass_hit_idx ? wrbypass_1_3 : wrbypass_0_3; // @[Mux.scala:50:70]
wire _update_wdata_0_ctr_T_2 = _GEN_1 == 3'h0; // @[tage.scala:67:25]
wire [3:0] _GEN_5 = {1'h0, _GEN_1}; // @[tage.scala:67:{25,43}]
wire [3:0] _update_wdata_0_ctr_T_3 = _GEN_5 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_0_ctr_T_4 = _update_wdata_0_ctr_T_3[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_0_ctr_T_5 = _update_wdata_0_ctr_T_2 ? 3'h0 : _update_wdata_0_ctr_T_4; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_0_ctr_T_6 = &_GEN_1; // @[tage.scala:67:25, :68:25]
wire [3:0] _update_wdata_0_ctr_T_7 = _GEN_5 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_0_ctr_T_8 = _update_wdata_0_ctr_T_7[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_0_ctr_T_9 = _update_wdata_0_ctr_T_6 ? 3'h7 : _update_wdata_0_ctr_T_8; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_0_ctr_T_10 = _update_wdata_0_ctr_T_1 ? _update_wdata_0_ctr_T_5 : _update_wdata_0_ctr_T_9; // @[tage.scala:67:{8,9,20}, :68:20]
wire _update_wdata_0_ctr_T_11 = ~io_update_taken_0_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_0_ctr_T_12 = io_update_old_ctr_0_0 == 3'h0; // @[tage.scala:24:7, :67:25]
wire [3:0] _GEN_6 = {1'h0, io_update_old_ctr_0_0}; // @[tage.scala:24:7, :67:43]
wire [3:0] _update_wdata_0_ctr_T_13 = _GEN_6 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_0_ctr_T_14 = _update_wdata_0_ctr_T_13[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_0_ctr_T_15 = _update_wdata_0_ctr_T_12 ? 3'h0 : _update_wdata_0_ctr_T_14; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_0_ctr_T_16 = &io_update_old_ctr_0_0; // @[tage.scala:24:7, :68:25]
wire [3:0] _update_wdata_0_ctr_T_17 = _GEN_6 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_0_ctr_T_18 = _update_wdata_0_ctr_T_17[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_0_ctr_T_19 = _update_wdata_0_ctr_T_16 ? 3'h7 : _update_wdata_0_ctr_T_18; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_0_ctr_T_20 = _update_wdata_0_ctr_T_11 ? _update_wdata_0_ctr_T_15 : _update_wdata_0_ctr_T_19; // @[tage.scala:67:{8,9,20}, :68:20]
wire [2:0] _update_wdata_0_ctr_T_21 = wrbypass_hit ? _update_wdata_0_ctr_T_10 : _update_wdata_0_ctr_T_20; // @[tage.scala:67:8, :164:48, :172:10]
assign _update_wdata_0_ctr_T_22 = io_update_alloc_0_0 ? _update_wdata_0_ctr_T : _update_wdata_0_ctr_T_21; // @[tage.scala:24:7, :168:33, :169:10, :172:10]
assign update_wdata_0_ctr = _update_wdata_0_ctr_T_22; // @[tage.scala:120:26, :168:33]
wire [2:0] _update_wdata_1_ctr_T = io_update_taken_1_0 ? 3'h4 : 3'h3; // @[tage.scala:24:7, :169:10]
wire _update_wdata_1_ctr_T_1 = ~io_update_taken_1_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_1_ctr_T_2 = _GEN_2 == 3'h0; // @[tage.scala:67:25]
wire [3:0] _GEN_7 = {1'h0, _GEN_2}; // @[tage.scala:67:{25,43}]
wire [3:0] _update_wdata_1_ctr_T_3 = _GEN_7 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_1_ctr_T_4 = _update_wdata_1_ctr_T_3[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_1_ctr_T_5 = _update_wdata_1_ctr_T_2 ? 3'h0 : _update_wdata_1_ctr_T_4; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_1_ctr_T_6 = &_GEN_2; // @[tage.scala:67:25, :68:25]
wire [3:0] _update_wdata_1_ctr_T_7 = _GEN_7 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_1_ctr_T_8 = _update_wdata_1_ctr_T_7[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_1_ctr_T_9 = _update_wdata_1_ctr_T_6 ? 3'h7 : _update_wdata_1_ctr_T_8; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_1_ctr_T_10 = _update_wdata_1_ctr_T_1 ? _update_wdata_1_ctr_T_5 : _update_wdata_1_ctr_T_9; // @[tage.scala:67:{8,9,20}, :68:20]
wire _update_wdata_1_ctr_T_11 = ~io_update_taken_1_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_1_ctr_T_12 = io_update_old_ctr_1_0 == 3'h0; // @[tage.scala:24:7, :67:25]
wire [3:0] _GEN_8 = {1'h0, io_update_old_ctr_1_0}; // @[tage.scala:24:7, :67:43]
wire [3:0] _update_wdata_1_ctr_T_13 = _GEN_8 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_1_ctr_T_14 = _update_wdata_1_ctr_T_13[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_1_ctr_T_15 = _update_wdata_1_ctr_T_12 ? 3'h0 : _update_wdata_1_ctr_T_14; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_1_ctr_T_16 = &io_update_old_ctr_1_0; // @[tage.scala:24:7, :68:25]
wire [3:0] _update_wdata_1_ctr_T_17 = _GEN_8 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_1_ctr_T_18 = _update_wdata_1_ctr_T_17[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_1_ctr_T_19 = _update_wdata_1_ctr_T_16 ? 3'h7 : _update_wdata_1_ctr_T_18; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_1_ctr_T_20 = _update_wdata_1_ctr_T_11 ? _update_wdata_1_ctr_T_15 : _update_wdata_1_ctr_T_19; // @[tage.scala:67:{8,9,20}, :68:20]
wire [2:0] _update_wdata_1_ctr_T_21 = wrbypass_hit ? _update_wdata_1_ctr_T_10 : _update_wdata_1_ctr_T_20; // @[tage.scala:67:8, :164:48, :172:10]
assign _update_wdata_1_ctr_T_22 = io_update_alloc_1_0 ? _update_wdata_1_ctr_T : _update_wdata_1_ctr_T_21; // @[tage.scala:24:7, :168:33, :169:10, :172:10]
assign update_wdata_1_ctr = _update_wdata_1_ctr_T_22; // @[tage.scala:120:26, :168:33]
wire [2:0] _update_wdata_2_ctr_T = io_update_taken_2_0 ? 3'h4 : 3'h3; // @[tage.scala:24:7, :169:10]
wire _update_wdata_2_ctr_T_1 = ~io_update_taken_2_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_2_ctr_T_2 = _GEN_3 == 3'h0; // @[tage.scala:67:25]
wire [3:0] _GEN_9 = {1'h0, _GEN_3}; // @[tage.scala:67:{25,43}]
wire [3:0] _update_wdata_2_ctr_T_3 = _GEN_9 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_2_ctr_T_4 = _update_wdata_2_ctr_T_3[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_2_ctr_T_5 = _update_wdata_2_ctr_T_2 ? 3'h0 : _update_wdata_2_ctr_T_4; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_2_ctr_T_6 = &_GEN_3; // @[tage.scala:67:25, :68:25]
wire [3:0] _update_wdata_2_ctr_T_7 = _GEN_9 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_2_ctr_T_8 = _update_wdata_2_ctr_T_7[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_2_ctr_T_9 = _update_wdata_2_ctr_T_6 ? 3'h7 : _update_wdata_2_ctr_T_8; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_2_ctr_T_10 = _update_wdata_2_ctr_T_1 ? _update_wdata_2_ctr_T_5 : _update_wdata_2_ctr_T_9; // @[tage.scala:67:{8,9,20}, :68:20]
wire _update_wdata_2_ctr_T_11 = ~io_update_taken_2_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_2_ctr_T_12 = io_update_old_ctr_2_0 == 3'h0; // @[tage.scala:24:7, :67:25]
wire [3:0] _GEN_10 = {1'h0, io_update_old_ctr_2_0}; // @[tage.scala:24:7, :67:43]
wire [3:0] _update_wdata_2_ctr_T_13 = _GEN_10 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_2_ctr_T_14 = _update_wdata_2_ctr_T_13[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_2_ctr_T_15 = _update_wdata_2_ctr_T_12 ? 3'h0 : _update_wdata_2_ctr_T_14; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_2_ctr_T_16 = &io_update_old_ctr_2_0; // @[tage.scala:24:7, :68:25]
wire [3:0] _update_wdata_2_ctr_T_17 = _GEN_10 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_2_ctr_T_18 = _update_wdata_2_ctr_T_17[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_2_ctr_T_19 = _update_wdata_2_ctr_T_16 ? 3'h7 : _update_wdata_2_ctr_T_18; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_2_ctr_T_20 = _update_wdata_2_ctr_T_11 ? _update_wdata_2_ctr_T_15 : _update_wdata_2_ctr_T_19; // @[tage.scala:67:{8,9,20}, :68:20]
wire [2:0] _update_wdata_2_ctr_T_21 = wrbypass_hit ? _update_wdata_2_ctr_T_10 : _update_wdata_2_ctr_T_20; // @[tage.scala:67:8, :164:48, :172:10]
assign _update_wdata_2_ctr_T_22 = io_update_alloc_2_0 ? _update_wdata_2_ctr_T : _update_wdata_2_ctr_T_21; // @[tage.scala:24:7, :168:33, :169:10, :172:10]
assign update_wdata_2_ctr = _update_wdata_2_ctr_T_22; // @[tage.scala:120:26, :168:33]
wire [2:0] _update_wdata_3_ctr_T = io_update_taken_3_0 ? 3'h4 : 3'h3; // @[tage.scala:24:7, :169:10]
wire _update_wdata_3_ctr_T_1 = ~io_update_taken_3_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_3_ctr_T_2 = _GEN_4 == 3'h0; // @[tage.scala:67:25]
wire [3:0] _GEN_11 = {1'h0, _GEN_4}; // @[tage.scala:67:{25,43}]
wire [3:0] _update_wdata_3_ctr_T_3 = _GEN_11 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_3_ctr_T_4 = _update_wdata_3_ctr_T_3[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_3_ctr_T_5 = _update_wdata_3_ctr_T_2 ? 3'h0 : _update_wdata_3_ctr_T_4; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_3_ctr_T_6 = &_GEN_4; // @[tage.scala:67:25, :68:25]
wire [3:0] _update_wdata_3_ctr_T_7 = _GEN_11 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_3_ctr_T_8 = _update_wdata_3_ctr_T_7[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_3_ctr_T_9 = _update_wdata_3_ctr_T_6 ? 3'h7 : _update_wdata_3_ctr_T_8; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_3_ctr_T_10 = _update_wdata_3_ctr_T_1 ? _update_wdata_3_ctr_T_5 : _update_wdata_3_ctr_T_9; // @[tage.scala:67:{8,9,20}, :68:20]
wire _update_wdata_3_ctr_T_11 = ~io_update_taken_3_0; // @[tage.scala:24:7, :67:9]
wire _update_wdata_3_ctr_T_12 = io_update_old_ctr_3_0 == 3'h0; // @[tage.scala:24:7, :67:25]
wire [3:0] _GEN_12 = {1'h0, io_update_old_ctr_3_0}; // @[tage.scala:24:7, :67:43]
wire [3:0] _update_wdata_3_ctr_T_13 = _GEN_12 - 4'h1; // @[tage.scala:67:43]
wire [2:0] _update_wdata_3_ctr_T_14 = _update_wdata_3_ctr_T_13[2:0]; // @[tage.scala:67:43]
wire [2:0] _update_wdata_3_ctr_T_15 = _update_wdata_3_ctr_T_12 ? 3'h0 : _update_wdata_3_ctr_T_14; // @[tage.scala:67:{20,25,43}]
wire _update_wdata_3_ctr_T_16 = &io_update_old_ctr_3_0; // @[tage.scala:24:7, :68:25]
wire [3:0] _update_wdata_3_ctr_T_17 = _GEN_12 + 4'h1; // @[tage.scala:67:43, :68:43]
wire [2:0] _update_wdata_3_ctr_T_18 = _update_wdata_3_ctr_T_17[2:0]; // @[tage.scala:68:43]
wire [2:0] _update_wdata_3_ctr_T_19 = _update_wdata_3_ctr_T_16 ? 3'h7 : _update_wdata_3_ctr_T_18; // @[tage.scala:68:{20,25,43}]
wire [2:0] _update_wdata_3_ctr_T_20 = _update_wdata_3_ctr_T_11 ? _update_wdata_3_ctr_T_15 : _update_wdata_3_ctr_T_19; // @[tage.scala:67:{8,9,20}, :68:20]
wire [2:0] _update_wdata_3_ctr_T_21 = wrbypass_hit ? _update_wdata_3_ctr_T_10 : _update_wdata_3_ctr_T_20; // @[tage.scala:67:8, :164:48, :172:10]
assign _update_wdata_3_ctr_T_22 = io_update_alloc_3_0 ? _update_wdata_3_ctr_T : _update_wdata_3_ctr_T_21; // @[tage.scala:24:7, :168:33, :169:10, :172:10]
assign update_wdata_3_ctr = _update_wdata_3_ctr_T_22; // @[tage.scala:120:26, :168:33]
wire [1:0] _wrbypass_enq_idx_T = {1'h0, wrbypass_enq_idx} + 2'h1; // @[util.scala:211:14]
wire _wrbypass_enq_idx_T_1 = _wrbypass_enq_idx_T[0]; // @[util.scala:211:14]
wire _wrbypass_enq_idx_T_2 = _wrbypass_enq_idx_T_1; // @[util.scala:211:{14,20}]
wire _T_31 = _wen_T | io_update_mask_2_0 | io_update_mask_3_0; // @[tage.scala:24:7, :121:60, :180:32]
wire _GEN_13 = wrbypass_hit ? wrbypass_hit_idx : wrbypass_enq_idx; // @[Mux.scala:50:70]
wire _GEN_14 = ~_T_31 | wrbypass_hit | wrbypass_enq_idx; // @[tage.scala:154:29, :156:29, :157:33, :164:48, :180:{32,38}, :181:39, :185:39]
wire _GEN_15 = ~_T_31 | wrbypass_hit | ~wrbypass_enq_idx; // @[tage.scala:154:29, :156:29, :157:33, :164:48, :180:{32,38}, :181:39, :185:39]
always @(posedge clock) begin // @[tage.scala:24:7]
if (reset) begin // @[tage.scala:24:7]
doing_reset <= 1'h1; // @[tage.scala:72:28]
reset_idx <= 8'h0; // @[tage.scala:73:26]
clear_u_ctr <= 20'h0; // @[tage.scala:109:28]
wrbypass_enq_idx <= 1'h0; // @[tage.scala:157:33]
end
else begin // @[tage.scala:24:7]
doing_reset <= reset_idx != 8'hFF & doing_reset; // @[tage.scala:72:28, :73:26, :75:{19,36,50}]
reset_idx <= _reset_idx_T_1; // @[tage.scala:73:26, :74:26]
clear_u_ctr <= doing_reset ? 20'h1 : _clear_u_ctr_T_1; // @[tage.scala:72:28, :109:28, :110:{22,36,70,85}]
if (~_T_31 | wrbypass_hit) begin // @[tage.scala:156:29, :157:33, :164:48, :180:{32,38}, :181:39]
end
else // @[tage.scala:157:33, :180:38, :181:39]
wrbypass_enq_idx <= _wrbypass_enq_idx_T_2; // @[util.scala:211:20]
end
s2_tag <= s1_tag; // @[tage.scala:62:64, :96:29]
REG <= wen; // @[tage.scala:121:21, :123:16]
if (_GEN_14) begin // @[tage.scala:154:29, :180:38, :181:39, :185:39]
end
else // @[tage.scala:154:29, :180:38, :181:39, :185:39]
wrbypass_tags_0 <= update_tag; // @[tage.scala:62:64, :154:29]
if (_GEN_15) begin // @[tage.scala:154:29, :180:38, :181:39, :185:39]
end
else // @[tage.scala:154:29, :180:38, :181:39, :185:39]
wrbypass_tags_1 <= update_tag; // @[tage.scala:62:64, :154:29]
if (_GEN_14) begin // @[tage.scala:154:29, :155:29, :180:38, :181:39, :185:39, :186:39]
end
else // @[tage.scala:155:29, :180:38, :181:39, :186:39]
wrbypass_idxs_0 <= update_idx; // @[tage.scala:60:43, :155:29]
if (_GEN_15) begin // @[tage.scala:154:29, :155:29, :180:38, :181:39, :185:39, :186:39]
end
else // @[tage.scala:155:29, :180:38, :181:39, :186:39]
wrbypass_idxs_1 <= update_idx; // @[tage.scala:60:43, :155:29]
if (~_T_31 | _GEN_13) begin // @[tage.scala:156:29, :180:{32,38}, :181:39, :182:34, :184:39]
end
else begin // @[tage.scala:156:29, :180:38, :181:39]
wrbypass_0_0 <= update_wdata_0_ctr; // @[tage.scala:120:26, :156:29]
wrbypass_0_1 <= update_wdata_1_ctr; // @[tage.scala:120:26, :156:29]
wrbypass_0_2 <= update_wdata_2_ctr; // @[tage.scala:120:26, :156:29]
wrbypass_0_3 <= update_wdata_3_ctr; // @[tage.scala:120:26, :156:29]
end
if (_T_31 & _GEN_13) begin // @[tage.scala:156:29, :180:{32,38}, :181:39, :182:34, :184:39]
wrbypass_1_0 <= update_wdata_0_ctr; // @[tage.scala:120:26, :156:29]
wrbypass_1_1 <= update_wdata_1_ctr; // @[tage.scala:120:26, :156:29]
wrbypass_1_2 <= update_wdata_2_ctr; // @[tage.scala:120:26, :156:29]
wrbypass_1_3 <= update_wdata_3_ctr; // @[tage.scala:120:26, :156:29]
end
always @(posedge)
tage_u_16 tage_u_16 ( // @[tage.scala:89:27]
.R0_addr (_u_rdata_WIRE), // @[tage.scala:140:12]
.R0_en (io_f1_req_valid_0), // @[tage.scala:24:7]
.R0_clk (clock),
.R0_data (_tage_u_16_R0_data),
.W0_addr (widx_1[7:0]), // @[tage.scala:144:19, :147:13]
.W0_en (update_u_wen), // @[tage.scala:136:30]
.W0_clk (clock),
.W0_data ({wdata_1_7, wdata_1_6, wdata_1_5, wdata_1_4, wdata_1_3, wdata_1_2, wdata_1_1, wdata_1_0}), // @[tage.scala:89:27, :145:20]
.W0_mask (wmask_1) // @[tage.scala:146:20]
); // @[tage.scala:89:27]
tage_table_16 tage_table_16 ( // @[tage.scala:90:27]
.R0_addr (_rdata_WIRE), // @[tage.scala:122:99]
.R0_en (io_f1_req_valid_0), // @[tage.scala:24:7]
.R0_clk (clock),
.R0_data (_tage_table_16_R0_data),
.W0_addr (widx), // @[tage.scala:129:19]
.W0_en (wen), // @[tage.scala:121:21]
.W0_clk (clock),
.W0_data ({wdata_3, wdata_2, wdata_1, wdata_0}), // @[tage.scala:90:27, :130:20]
.W0_mask (wmask) // @[tage.scala:131:20]
); // @[tage.scala:90:27]
assign io_f2_resp_0_valid = io_f2_resp_0_valid_0; // @[tage.scala:24:7]
assign io_f2_resp_0_bits_ctr = io_f2_resp_0_bits_ctr_0; // @[tage.scala:24:7]
assign io_f2_resp_0_bits_u = io_f2_resp_0_bits_u_0; // @[tage.scala:24:7]
assign io_f2_resp_1_valid = io_f2_resp_1_valid_0; // @[tage.scala:24:7]
assign io_f2_resp_1_bits_ctr = io_f2_resp_1_bits_ctr_0; // @[tage.scala:24:7]
assign io_f2_resp_1_bits_u = io_f2_resp_1_bits_u_0; // @[tage.scala:24:7]
assign io_f2_resp_2_valid = io_f2_resp_2_valid_0; // @[tage.scala:24:7]
assign io_f2_resp_2_bits_ctr = io_f2_resp_2_bits_ctr_0; // @[tage.scala:24:7]
assign io_f2_resp_2_bits_u = io_f2_resp_2_bits_u_0; // @[tage.scala:24:7]
assign io_f2_resp_3_valid = io_f2_resp_3_valid_0; // @[tage.scala:24:7]
assign io_f2_resp_3_bits_ctr = io_f2_resp_3_bits_ctr_0; // @[tage.scala:24:7]
assign io_f2_resp_3_bits_u = io_f2_resp_3_bits_u_0; // @[tage.scala:24:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File SinkC.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class SinkCResponse(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val last = Bool()
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val source = UInt(params.inner.bundle.sourceBits.W)
val param = UInt(3.W)
val data = Bool()
}
class PutBufferCEntry(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val data = UInt(params.inner.bundle.dataBits.W)
val corrupt = Bool()
}
class SinkC(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Decoupled(new FullRequest(params)) // Release
val resp = Valid(new SinkCResponse(params)) // ProbeAck
val c = Flipped(Decoupled(new TLBundleC(params.inner.bundle)))
// Find 'way' via MSHR CAM lookup
val set = UInt(params.setBits.W)
val way = Flipped(UInt(params.wayBits.W))
// ProbeAck write-back
val bs_adr = Decoupled(new BankedStoreInnerAddress(params))
val bs_dat = new BankedStoreInnerPoison(params)
// SourceD sideband
val rel_pop = Flipped(Decoupled(new PutBufferPop(params)))
val rel_beat = new PutBufferCEntry(params)
})
if (params.firstLevel) {
// Tie off unused ports
io.req.valid := false.B
io.req.bits := DontCare
io.resp.valid := false.B
io.resp.bits := DontCare
io.c.ready := true.B
io.set := 0.U
io.bs_adr.valid := false.B
io.bs_adr.bits := DontCare
io.bs_dat := DontCare
io.rel_pop.ready := true.B
io.rel_beat := DontCare
} else {
// No restrictions on the type of buffer
val c = params.micro.innerBuf.c(io.c)
val (tag, set, offset) = params.parseAddress(c.bits.address)
val (first, last, _, beat) = params.inner.count(c)
val hasData = params.inner.hasData(c.bits)
val raw_resp = c.bits.opcode === TLMessages.ProbeAck || c.bits.opcode === TLMessages.ProbeAckData
val resp = Mux(c.valid, raw_resp, RegEnable(raw_resp, c.valid))
// Handling of C is broken into two cases:
// ProbeAck
// if hasData, must be written to BankedStore
// if last beat, trigger resp
// Release
// if first beat, trigger req
// if hasData, go to putBuffer
// if hasData && first beat, must claim a list
assert (!(c.valid && c.bits.corrupt), "Data poisoning unavailable")
io.set := Mux(c.valid, set, RegEnable(set, c.valid)) // finds us the way
// Cut path from inner C to the BankedStore SRAM setup
// ... this makes it easier to layout the L2 data banks far away
val bs_adr = Wire(chiselTypeOf(io.bs_adr))
io.bs_adr <> Queue(bs_adr, 1, pipe=true)
io.bs_dat.data := RegEnable(c.bits.data, bs_adr.fire)
bs_adr.valid := resp && (!first || (c.valid && hasData))
bs_adr.bits.noop := !c.valid
bs_adr.bits.way := io.way
bs_adr.bits.set := io.set
bs_adr.bits.beat := Mux(c.valid, beat, RegEnable(beat + bs_adr.ready.asUInt, c.valid))
bs_adr.bits.mask := ~0.U(params.innerMaskBits.W)
params.ccover(bs_adr.valid && !bs_adr.ready, "SINKC_SRAM_STALL", "Data SRAM busy")
io.resp.valid := resp && c.valid && (first || last) && (!hasData || bs_adr.ready)
io.resp.bits.last := last
io.resp.bits.set := set
io.resp.bits.tag := tag
io.resp.bits.source := c.bits.source
io.resp.bits.param := c.bits.param
io.resp.bits.data := hasData
val putbuffer = Module(new ListBuffer(ListBufferParameters(new PutBufferCEntry(params), params.relLists, params.relBeats, false)))
val lists = RegInit(0.U(params.relLists.W))
val lists_set = WireInit(init = 0.U(params.relLists.W))
val lists_clr = WireInit(init = 0.U(params.relLists.W))
lists := (lists | lists_set) & ~lists_clr
val free = !lists.andR
val freeOH = ~(leftOR(~lists) << 1) & ~lists
val freeIdx = OHToUInt(freeOH)
val req_block = first && !io.req.ready
val buf_block = hasData && !putbuffer.io.push.ready
val set_block = hasData && first && !free
params.ccover(c.valid && !raw_resp && req_block, "SINKC_REQ_STALL", "No MSHR available to sink request")
params.ccover(c.valid && !raw_resp && buf_block, "SINKC_BUF_STALL", "No space in putbuffer for beat")
params.ccover(c.valid && !raw_resp && set_block, "SINKC_SET_STALL", "No space in putbuffer for request")
c.ready := Mux(raw_resp, !hasData || bs_adr.ready, !req_block && !buf_block && !set_block)
io.req.valid := !resp && c.valid && first && !buf_block && !set_block
putbuffer.io.push.valid := !resp && c.valid && hasData && !req_block && !set_block
when (!resp && c.valid && first && hasData && !req_block && !buf_block) { lists_set := freeOH }
val put = Mux(first, freeIdx, RegEnable(freeIdx, first))
io.req.bits.prio := VecInit(4.U(3.W).asBools)
io.req.bits.control:= false.B
io.req.bits.opcode := c.bits.opcode
io.req.bits.param := c.bits.param
io.req.bits.size := c.bits.size
io.req.bits.source := c.bits.source
io.req.bits.offset := offset
io.req.bits.set := set
io.req.bits.tag := tag
io.req.bits.put := put
putbuffer.io.push.bits.index := put
putbuffer.io.push.bits.data.data := c.bits.data
putbuffer.io.push.bits.data.corrupt := c.bits.corrupt
// Grant access to pop the data
putbuffer.io.pop.bits := io.rel_pop.bits.index
putbuffer.io.pop.valid := io.rel_pop.fire
io.rel_pop.ready := putbuffer.io.valid(io.rel_pop.bits.index(log2Ceil(params.relLists)-1,0))
io.rel_beat := putbuffer.io.data
when (io.rel_pop.fire && io.rel_pop.bits.last) {
lists_clr := UIntToOH(io.rel_pop.bits.index, params.relLists)
}
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module SinkC( // @[SinkC.scala:41:7]
input clock, // @[SinkC.scala:41:7]
input reset, // @[SinkC.scala:41:7]
input io_req_ready, // @[SinkC.scala:43:14]
output io_req_valid, // @[SinkC.scala:43:14]
output [2:0] io_req_bits_opcode, // @[SinkC.scala:43:14]
output [2:0] io_req_bits_param, // @[SinkC.scala:43:14]
output [2:0] io_req_bits_size, // @[SinkC.scala:43:14]
output [5:0] io_req_bits_source, // @[SinkC.scala:43:14]
output [12:0] io_req_bits_tag, // @[SinkC.scala:43:14]
output [5:0] io_req_bits_offset, // @[SinkC.scala:43:14]
output [5:0] io_req_bits_put, // @[SinkC.scala:43:14]
output [9:0] io_req_bits_set, // @[SinkC.scala:43:14]
output io_resp_valid, // @[SinkC.scala:43:14]
output io_resp_bits_last, // @[SinkC.scala:43:14]
output [9:0] io_resp_bits_set, // @[SinkC.scala:43:14]
output [12:0] io_resp_bits_tag, // @[SinkC.scala:43:14]
output [5:0] io_resp_bits_source, // @[SinkC.scala:43:14]
output [2:0] io_resp_bits_param, // @[SinkC.scala:43:14]
output io_resp_bits_data, // @[SinkC.scala:43:14]
output io_c_ready, // @[SinkC.scala:43:14]
input io_c_valid, // @[SinkC.scala:43:14]
input [2:0] io_c_bits_opcode, // @[SinkC.scala:43:14]
input [2:0] io_c_bits_param, // @[SinkC.scala:43:14]
input [2:0] io_c_bits_size, // @[SinkC.scala:43:14]
input [5:0] io_c_bits_source, // @[SinkC.scala:43:14]
input [31:0] io_c_bits_address, // @[SinkC.scala:43:14]
input [63:0] io_c_bits_data, // @[SinkC.scala:43:14]
input io_c_bits_corrupt, // @[SinkC.scala:43:14]
output [9:0] io_set, // @[SinkC.scala:43:14]
input [2:0] io_way, // @[SinkC.scala:43:14]
input io_bs_adr_ready, // @[SinkC.scala:43:14]
output io_bs_adr_valid, // @[SinkC.scala:43:14]
output io_bs_adr_bits_noop, // @[SinkC.scala:43:14]
output [2:0] io_bs_adr_bits_way, // @[SinkC.scala:43:14]
output [9:0] io_bs_adr_bits_set, // @[SinkC.scala:43:14]
output [2:0] io_bs_adr_bits_beat, // @[SinkC.scala:43:14]
output io_bs_adr_bits_mask, // @[SinkC.scala:43:14]
output [63:0] io_bs_dat_data, // @[SinkC.scala:43:14]
output io_rel_pop_ready, // @[SinkC.scala:43:14]
input io_rel_pop_valid, // @[SinkC.scala:43:14]
input [5:0] io_rel_pop_bits_index, // @[SinkC.scala:43:14]
input io_rel_pop_bits_last, // @[SinkC.scala:43:14]
output [63:0] io_rel_beat_data, // @[SinkC.scala:43:14]
output io_rel_beat_corrupt // @[SinkC.scala:43:14]
);
wire [9:0] io_set_0; // @[SinkC.scala:41:7]
wire _putbuffer_io_push_ready; // @[SinkC.scala:115:27]
wire [1:0] _putbuffer_io_valid; // @[SinkC.scala:115:27]
wire _c_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire [2:0] _c_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
wire [2:0] _c_q_io_deq_bits_param; // @[Decoupled.scala:362:21]
wire [2:0] _c_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [5:0] _c_q_io_deq_bits_source; // @[Decoupled.scala:362:21]
wire [31:0] _c_q_io_deq_bits_address; // @[Decoupled.scala:362:21]
wire [63:0] _c_q_io_deq_bits_data; // @[Decoupled.scala:362:21]
wire _c_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
wire io_req_ready_0 = io_req_ready; // @[SinkC.scala:41:7]
wire io_c_valid_0 = io_c_valid; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_opcode_0 = io_c_bits_opcode; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_param_0 = io_c_bits_param; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_size_0 = io_c_bits_size; // @[SinkC.scala:41:7]
wire [5:0] io_c_bits_source_0 = io_c_bits_source; // @[SinkC.scala:41:7]
wire [31:0] io_c_bits_address_0 = io_c_bits_address; // @[SinkC.scala:41:7]
wire [63:0] io_c_bits_data_0 = io_c_bits_data; // @[SinkC.scala:41:7]
wire io_c_bits_corrupt_0 = io_c_bits_corrupt; // @[SinkC.scala:41:7]
wire [2:0] io_way_0 = io_way; // @[SinkC.scala:41:7]
wire io_bs_adr_ready_0 = io_bs_adr_ready; // @[SinkC.scala:41:7]
wire io_rel_pop_valid_0 = io_rel_pop_valid; // @[SinkC.scala:41:7]
wire [5:0] io_rel_pop_bits_index_0 = io_rel_pop_bits_index; // @[SinkC.scala:41:7]
wire io_rel_pop_bits_last_0 = io_rel_pop_bits_last; // @[SinkC.scala:41:7]
wire io_req_bits_prio_0 = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_prio_1 = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_control = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_prio_2 = 1'h1; // @[SinkC.scala:41:7]
wire bs_adr_bits_mask = 1'h1; // @[SinkC.scala:96:22]
wire _bs_adr_bits_mask_T = 1'h1; // @[SinkC.scala:104:25]
wire _io_req_valid_T_6; // @[SinkC.scala:136:61]
wire [12:0] tag_1; // @[Parameters.scala:217:9]
wire [5:0] offset_1; // @[Parameters.scala:217:50]
wire [9:0] set_1; // @[Parameters.scala:217:28]
wire _io_resp_valid_T_5; // @[SinkC.scala:107:57]
wire last; // @[Edges.scala:232:33]
wire hasData; // @[Edges.scala:102:36]
wire [9:0] _io_set_T; // @[SinkC.scala:92:18]
wire [9:0] bs_adr_bits_set = io_set_0; // @[SinkC.scala:41:7, :96:22]
wire [2:0] bs_adr_bits_way = io_way_0; // @[SinkC.scala:41:7, :96:22]
wire _io_rel_pop_ready_T_2; // @[SinkC.scala:160:43]
wire [2:0] io_req_bits_opcode_0; // @[SinkC.scala:41:7]
wire [2:0] io_req_bits_param_0; // @[SinkC.scala:41:7]
wire [2:0] io_req_bits_size_0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_source_0; // @[SinkC.scala:41:7]
wire [12:0] io_req_bits_tag_0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_offset_0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_put_0; // @[SinkC.scala:41:7]
wire [9:0] io_req_bits_set_0; // @[SinkC.scala:41:7]
wire io_req_valid_0; // @[SinkC.scala:41:7]
wire io_resp_bits_last_0; // @[SinkC.scala:41:7]
wire [9:0] io_resp_bits_set_0; // @[SinkC.scala:41:7]
wire [12:0] io_resp_bits_tag_0; // @[SinkC.scala:41:7]
wire [5:0] io_resp_bits_source_0; // @[SinkC.scala:41:7]
wire [2:0] io_resp_bits_param_0; // @[SinkC.scala:41:7]
wire io_resp_bits_data_0; // @[SinkC.scala:41:7]
wire io_resp_valid_0; // @[SinkC.scala:41:7]
wire io_c_ready_0; // @[SinkC.scala:41:7]
wire io_bs_adr_bits_noop_0; // @[SinkC.scala:41:7]
wire [2:0] io_bs_adr_bits_way_0; // @[SinkC.scala:41:7]
wire [9:0] io_bs_adr_bits_set_0; // @[SinkC.scala:41:7]
wire [2:0] io_bs_adr_bits_beat_0; // @[SinkC.scala:41:7]
wire io_bs_adr_bits_mask_0; // @[SinkC.scala:41:7]
wire io_bs_adr_valid_0; // @[SinkC.scala:41:7]
wire [63:0] io_bs_dat_data_0; // @[SinkC.scala:41:7]
wire io_rel_pop_ready_0; // @[SinkC.scala:41:7]
wire [63:0] io_rel_beat_data_0; // @[SinkC.scala:41:7]
wire io_rel_beat_corrupt_0; // @[SinkC.scala:41:7]
wire _offset_T = _c_q_io_deq_bits_address[0]; // @[Decoupled.scala:362:21]
wire _offset_T_1 = _c_q_io_deq_bits_address[1]; // @[Decoupled.scala:362:21]
wire _offset_T_2 = _c_q_io_deq_bits_address[2]; // @[Decoupled.scala:362:21]
wire _offset_T_3 = _c_q_io_deq_bits_address[3]; // @[Decoupled.scala:362:21]
wire _offset_T_4 = _c_q_io_deq_bits_address[4]; // @[Decoupled.scala:362:21]
wire _offset_T_5 = _c_q_io_deq_bits_address[5]; // @[Decoupled.scala:362:21]
wire _offset_T_6 = _c_q_io_deq_bits_address[6]; // @[Decoupled.scala:362:21]
wire _offset_T_7 = _c_q_io_deq_bits_address[7]; // @[Decoupled.scala:362:21]
wire _offset_T_8 = _c_q_io_deq_bits_address[8]; // @[Decoupled.scala:362:21]
wire _offset_T_9 = _c_q_io_deq_bits_address[9]; // @[Decoupled.scala:362:21]
wire _offset_T_10 = _c_q_io_deq_bits_address[10]; // @[Decoupled.scala:362:21]
wire _offset_T_11 = _c_q_io_deq_bits_address[11]; // @[Decoupled.scala:362:21]
wire _offset_T_12 = _c_q_io_deq_bits_address[12]; // @[Decoupled.scala:362:21]
wire _offset_T_13 = _c_q_io_deq_bits_address[13]; // @[Decoupled.scala:362:21]
wire _offset_T_14 = _c_q_io_deq_bits_address[14]; // @[Decoupled.scala:362:21]
wire _offset_T_15 = _c_q_io_deq_bits_address[15]; // @[Decoupled.scala:362:21]
wire _offset_T_16 = _c_q_io_deq_bits_address[16]; // @[Decoupled.scala:362:21]
wire _offset_T_17 = _c_q_io_deq_bits_address[17]; // @[Decoupled.scala:362:21]
wire _offset_T_18 = _c_q_io_deq_bits_address[18]; // @[Decoupled.scala:362:21]
wire _offset_T_19 = _c_q_io_deq_bits_address[19]; // @[Decoupled.scala:362:21]
wire _offset_T_20 = _c_q_io_deq_bits_address[20]; // @[Decoupled.scala:362:21]
wire _offset_T_21 = _c_q_io_deq_bits_address[21]; // @[Decoupled.scala:362:21]
wire _offset_T_22 = _c_q_io_deq_bits_address[22]; // @[Decoupled.scala:362:21]
wire _offset_T_23 = _c_q_io_deq_bits_address[23]; // @[Decoupled.scala:362:21]
wire _offset_T_24 = _c_q_io_deq_bits_address[24]; // @[Decoupled.scala:362:21]
wire _offset_T_25 = _c_q_io_deq_bits_address[25]; // @[Decoupled.scala:362:21]
wire _offset_T_26 = _c_q_io_deq_bits_address[26]; // @[Decoupled.scala:362:21]
wire _offset_T_27 = _c_q_io_deq_bits_address[27]; // @[Decoupled.scala:362:21]
wire _offset_T_28 = _c_q_io_deq_bits_address[31]; // @[Decoupled.scala:362:21]
wire [1:0] offset_lo_lo_lo_hi = {_offset_T_2, _offset_T_1}; // @[Parameters.scala:214:{21,47}]
wire [2:0] offset_lo_lo_lo = {offset_lo_lo_lo_hi, _offset_T}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_lo_hi_lo = {_offset_T_4, _offset_T_3}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_lo_hi_hi = {_offset_T_6, _offset_T_5}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_lo_lo_hi = {offset_lo_lo_hi_hi, offset_lo_lo_hi_lo}; // @[Parameters.scala:214:21]
wire [6:0] offset_lo_lo = {offset_lo_lo_hi, offset_lo_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_lo_hi_lo_hi = {_offset_T_9, _offset_T_8}; // @[Parameters.scala:214:{21,47}]
wire [2:0] offset_lo_hi_lo = {offset_lo_hi_lo_hi, _offset_T_7}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_hi_hi_lo = {_offset_T_11, _offset_T_10}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_lo_hi_hi_hi = {_offset_T_13, _offset_T_12}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_lo_hi_hi = {offset_lo_hi_hi_hi, offset_lo_hi_hi_lo}; // @[Parameters.scala:214:21]
wire [6:0] offset_lo_hi = {offset_lo_hi_hi, offset_lo_hi_lo}; // @[Parameters.scala:214:21]
wire [13:0] offset_lo = {offset_lo_hi, offset_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_hi_lo_lo_hi = {_offset_T_16, _offset_T_15}; // @[Parameters.scala:214:{21,47}]
wire [2:0] offset_hi_lo_lo = {offset_hi_lo_lo_hi, _offset_T_14}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_lo_hi_lo = {_offset_T_18, _offset_T_17}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_lo_hi_hi = {_offset_T_20, _offset_T_19}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_hi_lo_hi = {offset_hi_lo_hi_hi, offset_hi_lo_hi_lo}; // @[Parameters.scala:214:21]
wire [6:0] offset_hi_lo = {offset_hi_lo_hi, offset_hi_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_hi_hi_lo_lo = {_offset_T_22, _offset_T_21}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_hi_lo_hi = {_offset_T_24, _offset_T_23}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_hi_hi_lo = {offset_hi_hi_lo_hi, offset_hi_hi_lo_lo}; // @[Parameters.scala:214:21]
wire [1:0] offset_hi_hi_hi_lo = {_offset_T_26, _offset_T_25}; // @[Parameters.scala:214:{21,47}]
wire [1:0] offset_hi_hi_hi_hi = {_offset_T_28, _offset_T_27}; // @[Parameters.scala:214:{21,47}]
wire [3:0] offset_hi_hi_hi = {offset_hi_hi_hi_hi, offset_hi_hi_hi_lo}; // @[Parameters.scala:214:21]
wire [7:0] offset_hi_hi = {offset_hi_hi_hi, offset_hi_hi_lo}; // @[Parameters.scala:214:21]
wire [14:0] offset_hi = {offset_hi_hi, offset_hi_lo}; // @[Parameters.scala:214:21]
wire [28:0] offset = {offset_hi, offset_lo}; // @[Parameters.scala:214:21]
wire [22:0] set = offset[28:6]; // @[Parameters.scala:214:21, :215:22]
wire [12:0] tag = set[22:10]; // @[Parameters.scala:215:22, :216:19]
assign tag_1 = tag; // @[Parameters.scala:216:19, :217:9]
assign io_req_bits_tag_0 = tag_1; // @[SinkC.scala:41:7]
assign io_resp_bits_tag_0 = tag_1; // @[SinkC.scala:41:7]
assign set_1 = set[9:0]; // @[Parameters.scala:215:22, :217:28]
assign io_req_bits_set_0 = set_1; // @[SinkC.scala:41:7]
assign io_resp_bits_set_0 = set_1; // @[SinkC.scala:41:7]
assign offset_1 = offset[5:0]; // @[Parameters.scala:214:21, :217:50]
assign io_req_bits_offset_0 = offset_1; // @[SinkC.scala:41:7]
wire _q_io_deq_ready_T_7; // @[SinkC.scala:134:19]
wire _T = _q_io_deq_ready_T_7 & _c_q_io_deq_valid; // @[Decoupled.scala:51:35, :362:21]
wire [12:0] _r_beats1_decode_T = 13'h3F << _c_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [5:0] _r_beats1_decode_T_1 = _r_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _r_beats1_decode_T_2 = ~_r_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] r_beats1_decode = _r_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire r_beats1_opdata = _c_q_io_deq_bits_opcode[0]; // @[Decoupled.scala:362:21]
assign hasData = _c_q_io_deq_bits_opcode[0]; // @[Decoupled.scala:362:21]
wire [2:0] r_beats1 = r_beats1_opdata ? r_beats1_decode : 3'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [2:0] r_counter; // @[Edges.scala:229:27]
wire [3:0] _r_counter1_T = {1'h0, r_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] r_counter1 = _r_counter1_T[2:0]; // @[Edges.scala:230:28]
wire first = r_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _r_last_T = r_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _r_last_T_1 = r_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
assign last = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}]
assign io_resp_bits_last_0 = last; // @[Edges.scala:232:33]
wire r_3 = last & _T; // @[Decoupled.scala:51:35]
wire [2:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] beat = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _r_counter_T = first ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
assign io_resp_bits_data_0 = hasData; // @[Edges.scala:102:36]
wire _raw_resp_T = _c_q_io_deq_bits_opcode == 3'h4; // @[Decoupled.scala:362:21]
wire _raw_resp_T_1 = _c_q_io_deq_bits_opcode == 3'h5; // @[Decoupled.scala:362:21]
wire raw_resp = _raw_resp_T | _raw_resp_T_1; // @[SinkC.scala:78:{34,58,75}]
reg resp_r; // @[SinkC.scala:79:48]
wire resp = _c_q_io_deq_valid ? raw_resp : resp_r; // @[Decoupled.scala:362:21] |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Fragmenter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, BufferParams, IdRange, TransferSizes}
import freechips.rocketchip.util.{Repeater, OH1ToUInt, UIntToOH1}
import scala.math.min
import freechips.rocketchip.util.DataToAugmentedData
object EarlyAck {
sealed trait T
case object AllPuts extends T
case object PutFulls extends T
case object None extends T
}
// minSize: minimum size of transfers supported by all outward managers
// maxSize: maximum size of transfers supported after the Fragmenter is applied
// alwaysMin: fragment all requests down to minSize (else fragment to maximum supported by manager)
// earlyAck: should a multibeat Put should be acknowledged on the first beat or last beat
// holdFirstDeny: allow the Fragmenter to unsafely combine multibeat Gets by taking the first denied for the whole burst
// nameSuffix: appends a suffix to the module name
// Fragmenter modifies: PutFull, PutPartial, LogicalData, Get, Hint
// Fragmenter passes: ArithmeticData (truncated to minSize if alwaysMin)
// Fragmenter cannot modify acquire (could livelock); thus it is unsafe to put caches on both sides
class TLFragmenter(val minSize: Int, val maxSize: Int, val alwaysMin: Boolean = false, val earlyAck: EarlyAck.T = EarlyAck.None, val holdFirstDeny: Boolean = false, val nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
require(isPow2 (maxSize), s"TLFragmenter expects pow2(maxSize), but got $maxSize")
require(isPow2 (minSize), s"TLFragmenter expects pow2(minSize), but got $minSize")
require(minSize <= maxSize, s"TLFragmenter expects min <= max, but got $minSize > $maxSize")
val fragmentBits = log2Ceil(maxSize / minSize)
val fullBits = if (earlyAck == EarlyAck.PutFulls) 1 else 0
val toggleBits = 1
val addedBits = fragmentBits + toggleBits + fullBits
def expandTransfer(x: TransferSizes, op: String) = if (!x) x else {
// validate that we can apply the fragmenter correctly
require (x.max >= minSize, s"TLFragmenter (with parent $parent) max transfer size $op(${x.max}) must be >= min transfer size (${minSize})")
TransferSizes(x.min, maxSize)
}
private def noChangeRequired = minSize == maxSize
private def shrinkTransfer(x: TransferSizes) =
if (!alwaysMin) x
else if (x.min <= minSize) TransferSizes(x.min, min(minSize, x.max))
else TransferSizes.none
private def mapManager(m: TLSlaveParameters) = m.v1copy(
supportsArithmetic = shrinkTransfer(m.supportsArithmetic),
supportsLogical = shrinkTransfer(m.supportsLogical),
supportsGet = expandTransfer(m.supportsGet, "Get"),
supportsPutFull = expandTransfer(m.supportsPutFull, "PutFull"),
supportsPutPartial = expandTransfer(m.supportsPutPartial, "PutParital"),
supportsHint = expandTransfer(m.supportsHint, "Hint"))
val node = new TLAdapterNode(
// We require that all the responses are mutually FIFO
// Thus we need to compact all of the masters into one big master
clientFn = { c => (if (noChangeRequired) c else c.v2copy(
masters = Seq(TLMasterParameters.v2(
name = "TLFragmenter",
sourceId = IdRange(0, if (minSize == maxSize) c.endSourceId else (c.endSourceId << addedBits)),
requestFifo = true,
emits = TLMasterToSlaveTransferSizes(
acquireT = shrinkTransfer(c.masters.map(_.emits.acquireT) .reduce(_ mincover _)),
acquireB = shrinkTransfer(c.masters.map(_.emits.acquireB) .reduce(_ mincover _)),
arithmetic = shrinkTransfer(c.masters.map(_.emits.arithmetic).reduce(_ mincover _)),
logical = shrinkTransfer(c.masters.map(_.emits.logical) .reduce(_ mincover _)),
get = shrinkTransfer(c.masters.map(_.emits.get) .reduce(_ mincover _)),
putFull = shrinkTransfer(c.masters.map(_.emits.putFull) .reduce(_ mincover _)),
putPartial = shrinkTransfer(c.masters.map(_.emits.putPartial).reduce(_ mincover _)),
hint = shrinkTransfer(c.masters.map(_.emits.hint) .reduce(_ mincover _))
)
))
))},
managerFn = { m => if (noChangeRequired) m else m.v2copy(slaves = m.slaves.map(mapManager)) }
) {
override def circuitIdentity = noChangeRequired
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLFragmenter") ++ nameSuffix).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
if (noChangeRequired) {
out <> in
} else {
// All managers must share a common FIFO domain (responses might end up interleaved)
val manager = edgeOut.manager
val managers = manager.managers
val beatBytes = manager.beatBytes
val fifoId = managers(0).fifoId
require (fifoId.isDefined && managers.map(_.fifoId == fifoId).reduce(_ && _))
require (!manager.anySupportAcquireB || !edgeOut.client.anySupportProbe,
s"TLFragmenter (with parent $parent) can't fragment a caching client's requests into a cacheable region")
require (minSize >= beatBytes, s"TLFragmenter (with parent $parent) can't support fragmenting ($minSize) to sub-beat ($beatBytes) accesses")
// We can't support devices which are cached on both sides of us
require (!edgeOut.manager.anySupportAcquireB || !edgeIn.client.anySupportProbe)
// We can't support denied because we reassemble fragments
require (!edgeOut.manager.mayDenyGet || holdFirstDeny, s"TLFragmenter (with parent $parent) can't support denials without holdFirstDeny=true")
require (!edgeOut.manager.mayDenyPut || earlyAck == EarlyAck.None)
/* The Fragmenter is a bit tricky, because there are 5 sizes in play:
* max size -- the maximum transfer size possible
* orig size -- the original pre-fragmenter size
* frag size -- the modified post-fragmenter size
* min size -- the threshold below which frag=orig
* beat size -- the amount transfered on any given beat
*
* The relationships are as follows:
* max >= orig >= frag
* max > min >= beat
* It IS possible that orig <= min (then frag=orig; ie: no fragmentation)
*
* The fragment# (sent via TL.source) is measured in multiples of min size.
* Meanwhile, to track the progress, counters measure in multiples of beat size.
*
* Here is an example of a bus with max=256, min=8, beat=4 and a device supporting 16.
*
* in.A out.A (frag#) out.D (frag#) in.D gen# ack#
* get64 get16 6 ackD16 6 ackD64 12 15
* ackD16 6 ackD64 14
* ackD16 6 ackD64 13
* ackD16 6 ackD64 12
* get16 4 ackD16 4 ackD64 8 11
* ackD16 4 ackD64 10
* ackD16 4 ackD64 9
* ackD16 4 ackD64 8
* get16 2 ackD16 2 ackD64 4 7
* ackD16 2 ackD64 6
* ackD16 2 ackD64 5
* ackD16 2 ackD64 4
* get16 0 ackD16 0 ackD64 0 3
* ackD16 0 ackD64 2
* ackD16 0 ackD64 1
* ackD16 0 ackD64 0
*
* get8 get8 0 ackD8 0 ackD8 0 1
* ackD8 0 ackD8 0
*
* get4 get4 0 ackD4 0 ackD4 0 0
* get1 get1 0 ackD1 0 ackD1 0 0
*
* put64 put16 6 15
* put64 put16 6 14
* put64 put16 6 13
* put64 put16 6 ack16 6 12 12
* put64 put16 4 11
* put64 put16 4 10
* put64 put16 4 9
* put64 put16 4 ack16 4 8 8
* put64 put16 2 7
* put64 put16 2 6
* put64 put16 2 5
* put64 put16 2 ack16 2 4 4
* put64 put16 0 3
* put64 put16 0 2
* put64 put16 0 1
* put64 put16 0 ack16 0 ack64 0 0
*
* put8 put8 0 1
* put8 put8 0 ack8 0 ack8 0 0
*
* put4 put4 0 ack4 0 ack4 0 0
* put1 put1 0 ack1 0 ack1 0 0
*/
val counterBits = log2Up(maxSize/beatBytes)
val maxDownSize = if (alwaysMin) minSize else min(manager.maxTransfer, maxSize)
// Consider the following waveform for two 4-beat bursts:
// ---A----A------------
// -------D-----DDD-DDDD
// Under TL rules, the second A can use the same source as the first A,
// because the source is released for reuse on the first response beat.
//
// However, if we fragment the requests, it looks like this:
// ---3210-3210---------
// -------3-----210-3210
// ... now we've broken the rules because 210 are twice inflight.
//
// This phenomenon means we can have essentially 2*maxSize/minSize-1
// fragmented transactions in flight per original transaction source.
//
// To keep the source unique, we encode the beat counter in the low
// bits of the source. To solve the overlap, we use a toggle bit.
// Whatever toggle bit the D is reassembling, A will use the opposite.
// First, handle the return path
val acknum = RegInit(0.U(counterBits.W))
val dOrig = Reg(UInt())
val dToggle = RegInit(false.B)
val dFragnum = out.d.bits.source(fragmentBits-1, 0)
val dFirst = acknum === 0.U
val dLast = dFragnum === 0.U // only for AccessAck (!Data)
val dsizeOH = UIntToOH (out.d.bits.size, log2Ceil(maxDownSize)+1)
val dsizeOH1 = UIntToOH1(out.d.bits.size, log2Up(maxDownSize))
val dHasData = edgeOut.hasData(out.d.bits)
// calculate new acknum
val acknum_fragment = dFragnum << log2Ceil(minSize/beatBytes)
val acknum_size = dsizeOH1 >> log2Ceil(beatBytes)
assert (!out.d.valid || (acknum_fragment & acknum_size) === 0.U)
val dFirst_acknum = acknum_fragment | Mux(dHasData, acknum_size, 0.U)
val ack_decrement = Mux(dHasData, 1.U, dsizeOH >> log2Ceil(beatBytes))
// calculate the original size
val dFirst_size = OH1ToUInt((dFragnum << log2Ceil(minSize)) | dsizeOH1)
when (out.d.fire) {
acknum := Mux(dFirst, dFirst_acknum, acknum - ack_decrement)
when (dFirst) {
dOrig := dFirst_size
dToggle := out.d.bits.source(fragmentBits)
}
}
// Swallow up non-data ack fragments
val doEarlyAck = earlyAck match {
case EarlyAck.AllPuts => true.B
case EarlyAck.PutFulls => out.d.bits.source(fragmentBits+1)
case EarlyAck.None => false.B
}
val drop = !dHasData && !Mux(doEarlyAck, dFirst, dLast)
out.d.ready := in.d.ready || drop
in.d.valid := out.d.valid && !drop
in.d.bits := out.d.bits // pass most stuff unchanged
in.d.bits.source := out.d.bits.source >> addedBits
in.d.bits.size := Mux(dFirst, dFirst_size, dOrig)
if (edgeOut.manager.mayDenyPut) {
val r_denied = Reg(Bool())
val d_denied = (!dFirst && r_denied) || out.d.bits.denied
when (out.d.fire) { r_denied := d_denied }
in.d.bits.denied := d_denied
}
if (edgeOut.manager.mayDenyGet) {
// Take denied only from the first beat and hold that value
val d_denied = out.d.bits.denied holdUnless dFirst
when (dHasData) {
in.d.bits.denied := d_denied
in.d.bits.corrupt := d_denied || out.d.bits.corrupt
}
}
// What maximum transfer sizes do downstream devices support?
val maxArithmetics = managers.map(_.supportsArithmetic.max)
val maxLogicals = managers.map(_.supportsLogical.max)
val maxGets = managers.map(_.supportsGet.max)
val maxPutFulls = managers.map(_.supportsPutFull.max)
val maxPutPartials = managers.map(_.supportsPutPartial.max)
val maxHints = managers.map(m => if (m.supportsHint) maxDownSize else 0)
// We assume that the request is valid => size 0 is impossible
val lgMinSize = log2Ceil(minSize).U
val maxLgArithmetics = maxArithmetics.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgLogicals = maxLogicals .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgGets = maxGets .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutFulls = maxPutFulls .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutPartials = maxPutPartials.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgHints = maxHints .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
// Make the request repeatable
val repeater = Module(new Repeater(in.a.bits))
repeater.io.enq <> in.a
val in_a = repeater.io.deq
// If this is infront of a single manager, these become constants
val find = manager.findFast(edgeIn.address(in_a.bits))
val maxLgArithmetic = Mux1H(find, maxLgArithmetics)
val maxLgLogical = Mux1H(find, maxLgLogicals)
val maxLgGet = Mux1H(find, maxLgGets)
val maxLgPutFull = Mux1H(find, maxLgPutFulls)
val maxLgPutPartial = Mux1H(find, maxLgPutPartials)
val maxLgHint = Mux1H(find, maxLgHints)
val limit = if (alwaysMin) lgMinSize else
MuxLookup(in_a.bits.opcode, lgMinSize)(Array(
TLMessages.PutFullData -> maxLgPutFull,
TLMessages.PutPartialData -> maxLgPutPartial,
TLMessages.ArithmeticData -> maxLgArithmetic,
TLMessages.LogicalData -> maxLgLogical,
TLMessages.Get -> maxLgGet,
TLMessages.Hint -> maxLgHint))
val aOrig = in_a.bits.size
val aFrag = Mux(aOrig > limit, limit, aOrig)
val aOrigOH1 = UIntToOH1(aOrig, log2Ceil(maxSize))
val aFragOH1 = UIntToOH1(aFrag, log2Up(maxDownSize))
val aHasData = edgeIn.hasData(in_a.bits)
val aMask = Mux(aHasData, 0.U, aFragOH1)
val gennum = RegInit(0.U(counterBits.W))
val aFirst = gennum === 0.U
val old_gennum1 = Mux(aFirst, aOrigOH1 >> log2Ceil(beatBytes), gennum - 1.U)
val new_gennum = ~(~old_gennum1 | (aMask >> log2Ceil(beatBytes))) // ~(~x|y) is width safe
val aFragnum = ~(~(old_gennum1 >> log2Ceil(minSize/beatBytes)) | (aFragOH1 >> log2Ceil(minSize)))
val aLast = aFragnum === 0.U
val aToggle = !Mux(aFirst, dToggle, RegEnable(dToggle, aFirst))
val aFull = if (earlyAck == EarlyAck.PutFulls) Some(in_a.bits.opcode === TLMessages.PutFullData) else None
when (out.a.fire) { gennum := new_gennum }
repeater.io.repeat := !aHasData && aFragnum =/= 0.U
out.a <> in_a
out.a.bits.address := in_a.bits.address | ~(old_gennum1 << log2Ceil(beatBytes) | ~aOrigOH1 | aFragOH1 | (minSize-1).U)
out.a.bits.source := Cat(Seq(in_a.bits.source) ++ aFull ++ Seq(aToggle.asUInt, aFragnum))
out.a.bits.size := aFrag
// Optimize away some of the Repeater's registers
assert (!repeater.io.full || !aHasData)
out.a.bits.data := in.a.bits.data
val fullMask = ((BigInt(1) << beatBytes) - 1).U
assert (!repeater.io.full || in_a.bits.mask === fullMask)
out.a.bits.mask := Mux(repeater.io.full, fullMask, in.a.bits.mask)
out.a.bits.user.waiveAll :<= in.a.bits.user.subset(_.isData)
// Tie off unused channels
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLFragmenter
{
def apply(minSize: Int, maxSize: Int, alwaysMin: Boolean = false, earlyAck: EarlyAck.T = EarlyAck.None, holdFirstDeny: Boolean = false, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
if (minSize <= maxSize) {
val fragmenter = LazyModule(new TLFragmenter(minSize, maxSize, alwaysMin, earlyAck, holdFirstDeny, nameSuffix))
fragmenter.node
} else { TLEphemeralNode()(ValName("no_fragmenter")) }
}
def apply(wrapper: TLBusWrapper, nameSuffix: Option[String])(implicit p: Parameters): TLNode = apply(wrapper.beatBytes, wrapper.blockBytes, nameSuffix = nameSuffix)
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper, None)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMFragmenter(ramBeatBytes: Int, maxSize: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Fragmenter"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff), beatBytes = ramBeatBytes))
(ram.node
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLDelayer(0.1)
:= TLFragmenter(ramBeatBytes, maxSize, earlyAck = EarlyAck.AllPuts)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLFragmenter(ramBeatBytes, maxSize/2)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMFragmenterTest(ramBeatBytes: Int, maxSize: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMFragmenter(ramBeatBytes,maxSize,txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File BaseTile.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util.{log2Ceil, log2Up}
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.bundlebridge._
import freechips.rocketchip.resources.{PropertyMap, PropertyOption, ResourceReference, DTSTimebase}
import freechips.rocketchip.interrupts.{IntInwardNode, IntOutwardNode}
import freechips.rocketchip.rocket.{ICacheParams, DCacheParams, BTBParams, ASIdBits, VMIdBits, TraceAux, BPWatch}
import freechips.rocketchip.subsystem.{
HierarchicalElementParams, InstantiableHierarchicalElementParams, HierarchicalElementCrossingParamsLike,
CacheBlockBytes, SystemBusKey, BaseHierarchicalElement, InsertTimingClosureRegistersOnHartIds, BaseHierarchicalElementModuleImp
}
import freechips.rocketchip.tilelink.{TLEphemeralNode, TLOutwardNode, TLNode, TLFragmenter, EarlyAck, TLWidthWidget, TLManagerParameters, ManagerUnification}
import freechips.rocketchip.prci.{ClockCrossingType, ClockSinkParameters}
import freechips.rocketchip.util.{TraceCoreParams, TraceCoreInterface}
import freechips.rocketchip.resources.{BigIntToProperty, IntToProperty, StringToProperty}
import freechips.rocketchip.util.BooleanToAugmentedBoolean
case object TileVisibilityNodeKey extends Field[TLEphemeralNode]
case object TileKey extends Field[TileParams]
case object LookupByHartId extends Field[LookupByHartIdImpl]
trait TileParams extends HierarchicalElementParams {
val core: CoreParams
val icache: Option[ICacheParams]
val dcache: Option[DCacheParams]
val btb: Option[BTBParams]
val tileId: Int // may not be hartid
val blockerCtrlAddr: Option[BigInt]
}
abstract class InstantiableTileParams[TileType <: BaseTile]
extends InstantiableHierarchicalElementParams[TileType]
with TileParams {
def instantiate(crossing: HierarchicalElementCrossingParamsLike, lookup: LookupByHartIdImpl)
(implicit p: Parameters): TileType
}
/** These parameters values are not computed based on diplomacy negotiation
* and so are safe to use while diplomacy itself is running.
*/
trait HasNonDiplomaticTileParameters {
implicit val p: Parameters
def tileParams: TileParams = p(TileKey)
def usingVM: Boolean = tileParams.core.useVM
def usingUser: Boolean = tileParams.core.useUser || usingSupervisor
def usingSupervisor: Boolean = tileParams.core.hasSupervisorMode
def usingHypervisor: Boolean = usingVM && tileParams.core.useHypervisor
def usingDebug: Boolean = tileParams.core.useDebug
def usingRoCC: Boolean = !p(BuildRoCC).isEmpty
def usingBTB: Boolean = tileParams.btb.isDefined && tileParams.btb.get.nEntries > 0
def usingPTW: Boolean = usingVM
def usingDataScratchpad: Boolean = tileParams.dcache.flatMap(_.scratch).isDefined
def xLen: Int = tileParams.core.xLen
def xBytes: Int = xLen / 8
def iLen: Int = 32
def pgIdxBits: Int = 12
def pgLevelBits: Int = 10 - log2Ceil(xLen / 32)
def pgLevels: Int = tileParams.core.pgLevels
def maxSVAddrBits: Int = pgIdxBits + pgLevels * pgLevelBits
def maxHypervisorExtraAddrBits: Int = 2
def hypervisorExtraAddrBits: Int = {
if (usingHypervisor) maxHypervisorExtraAddrBits
else 0
}
def maxHVAddrBits: Int = maxSVAddrBits + hypervisorExtraAddrBits
def minPgLevels: Int = {
val res = xLen match { case 32 => 2; case 64 => 3 }
require(pgLevels >= res)
res
}
def asIdBits: Int = p(ASIdBits)
def vmIdBits: Int = p(VMIdBits)
lazy val maxPAddrBits: Int = {
require(xLen == 32 || xLen == 64, s"Only XLENs of 32 or 64 are supported, but got $xLen")
xLen match { case 32 => 34; case 64 => 56 }
}
def tileId: Int = tileParams.tileId
def cacheBlockBytes = p(CacheBlockBytes)
def lgCacheBlockBytes = log2Up(cacheBlockBytes)
def masterPortBeatBytes = p(SystemBusKey).beatBytes
// TODO make HellaCacheIO diplomatic and remove this brittle collection of hacks
// Core PTW DTIM coprocessors
def dcacheArbPorts = 1 + usingVM.toInt + usingDataScratchpad.toInt + p(BuildRoCC).size + (tileParams.core.useVector && tileParams.core.vectorUseDCache).toInt
// TODO merge with isaString in CSR.scala
def isaDTS: String = {
val ie = if (tileParams.core.useRVE) "e" else "i"
val m = if (tileParams.core.mulDiv.nonEmpty) "m" else ""
val a = if (tileParams.core.useAtomics) "a" else ""
val f = if (tileParams.core.fpu.nonEmpty) "f" else ""
val d = if (tileParams.core.fpu.nonEmpty && tileParams.core.fpu.get.fLen > 32) "d" else ""
val c = if (tileParams.core.useCompressed) "c" else ""
val b = if (tileParams.core.useBitmanip) "b" else ""
val v = if (tileParams.core.useVector && tileParams.core.vLen >= 128 && tileParams.core.eLen == 64 && tileParams.core.vfLen == 64) "v" else ""
val h = if (usingHypervisor) "h" else ""
val ext_strs = Seq(
(tileParams.core.useVector) -> s"zvl${tileParams.core.vLen}b",
(tileParams.core.useVector) -> {
val c = tileParams.core.vfLen match {
case 64 => "d"
case 32 => "f"
case 0 => "x"
}
s"zve${tileParams.core.eLen}$c"
},
(tileParams.core.useVector && tileParams.core.vfh) -> "zvfh",
(tileParams.core.fpu.map(_.fLen >= 16).getOrElse(false) && tileParams.core.minFLen <= 16) -> "zfh",
(tileParams.core.useZba) -> "zba",
(tileParams.core.useZbb) -> "zbb",
(tileParams.core.useZbs) -> "zbs",
(tileParams.core.useConditionalZero) -> "zicond"
).filter(_._1).map(_._2)
val multiLetterExt = (
// rdcycle[h], rdinstret[h] is implemented
// rdtime[h] is not implemented, and could be provided by software emulation
// see https://github.com/chipsalliance/rocket-chip/issues/3207
//Some(Seq("zicntr")) ++
Some(Seq("zicsr", "zifencei", "zihpm")) ++
Some(ext_strs) ++ Some(tileParams.core.vExts) ++
tileParams.core.customIsaExt.map(Seq(_))
).flatten
val multiLetterString = multiLetterExt.mkString("_")
s"rv$xLen$ie$m$a$f$d$c$b$v$h$multiLetterString"
}
def tileProperties: PropertyMap = {
val dcache = tileParams.dcache.filter(!_.scratch.isDefined).map(d => Map(
"d-cache-block-size" -> cacheBlockBytes.asProperty,
"d-cache-sets" -> d.nSets.asProperty,
"d-cache-size" -> (d.nSets * d.nWays * cacheBlockBytes).asProperty)
).getOrElse(Nil)
val incoherent = if (!tileParams.core.useAtomicsOnlyForIO) Nil else Map(
"sifive,d-cache-incoherent" -> Nil)
val icache = tileParams.icache.map(i => Map(
"i-cache-block-size" -> cacheBlockBytes.asProperty,
"i-cache-sets" -> i.nSets.asProperty,
"i-cache-size" -> (i.nSets * i.nWays * cacheBlockBytes).asProperty)
).getOrElse(Nil)
val dtlb = tileParams.dcache.filter(_ => tileParams.core.useVM).map(d => Map(
"d-tlb-size" -> (d.nTLBWays * d.nTLBSets).asProperty,
"d-tlb-sets" -> d.nTLBSets.asProperty)).getOrElse(Nil)
val itlb = tileParams.icache.filter(_ => tileParams.core.useVM).map(i => Map(
"i-tlb-size" -> (i.nTLBWays * i.nTLBSets).asProperty,
"i-tlb-sets" -> i.nTLBSets.asProperty)).getOrElse(Nil)
val mmu =
if (tileParams.core.useVM) {
if (tileParams.core.useHypervisor) {
Map("tlb-split" -> Nil, "mmu-type" -> s"riscv,sv${maxSVAddrBits},sv${maxSVAddrBits}x4".asProperty)
} else {
Map("tlb-split" -> Nil, "mmu-type" -> s"riscv,sv$maxSVAddrBits".asProperty)
}
} else {
Nil
}
val pmp = if (tileParams.core.nPMPs > 0) Map(
"riscv,pmpregions" -> tileParams.core.nPMPs.asProperty,
"riscv,pmpgranularity" -> tileParams.core.pmpGranularity.asProperty) else Nil
dcache ++ icache ++ dtlb ++ itlb ++ mmu ++ pmp ++ incoherent
}
}
/** These parameters values are computed based on diplomacy negotiations
* and so are NOT safe to use while diplomacy itself is running.
* Only mix this trait into LazyModuleImps, Modules, Bundles, Data, etc.
*/
trait HasTileParameters extends HasNonDiplomaticTileParameters {
protected def tlBundleParams = p(TileVisibilityNodeKey).edges.out.head.bundle
lazy val paddrBits: Int = {
val bits = tlBundleParams.addressBits
require(bits <= maxPAddrBits, s"Requested $bits paddr bits, but since xLen is $xLen only $maxPAddrBits will fit")
bits
}
def vaddrBits: Int =
if (usingVM) {
val v = maxHVAddrBits
require(v == xLen || xLen > v && v > paddrBits)
v
} else {
// since virtual addresses sign-extend but physical addresses
// zero-extend, make room for a zero sign bit for physical addresses
(paddrBits + 1) min xLen
}
def vpnBits: Int = vaddrBits - pgIdxBits
def ppnBits: Int = paddrBits - pgIdxBits
def vpnBitsExtended: Int = vpnBits + (if (vaddrBits < xLen) 1 + usingHypervisor.toInt else 0)
def vaddrBitsExtended: Int = vpnBitsExtended + pgIdxBits
}
/** Base class for all Tiles that use TileLink */
abstract class BaseTile private (crossing: ClockCrossingType, q: Parameters)
extends BaseHierarchicalElement(crossing)(q)
with HasNonDiplomaticTileParameters
{
// Public constructor alters Parameters to supply some legacy compatibility keys
def this(tileParams: TileParams, crossing: ClockCrossingType, lookup: LookupByHartIdImpl, p: Parameters) = {
this(crossing, p.alterMap(Map(
TileKey -> tileParams,
TileVisibilityNodeKey -> TLEphemeralNode()(ValName("tile_master")),
LookupByHartId -> lookup
)))
}
def intInwardNode: IntInwardNode // Interrupts to the core from external devices
def intOutwardNode: Option[IntOutwardNode] // Interrupts from tile-internal devices (e.g. BEU)
def haltNode: IntOutwardNode // Unrecoverable error has occurred; suggest reset
def ceaseNode: IntOutwardNode // Tile has ceased to retire instructions
def wfiNode: IntOutwardNode // Tile is waiting for an interrupt
def module: BaseTileModuleImp[BaseTile]
/** Node for broadcasting a hart id to diplomatic consumers within the tile. */
val hartIdNexusNode: BundleBridgeNode[UInt] = BundleBroadcast[UInt](registered = p(InsertTimingClosureRegistersOnHartIds))
/** Node for consuming the hart id input in tile-layer Chisel logic. */
val hartIdSinkNode = BundleBridgeSink[UInt]()
/** Node for driving a hart id input, which is to be broadcast to units within the tile.
*
* Making this id value an IO and then using it to do lookups of information
* that would make otherwise-homogeneous tiles heterogeneous is a useful trick
* to enable deduplication of tiles for hierarchical P&R flows.
*/
val hartIdNode: BundleBridgeInwardNode[UInt] =
hartIdSinkNode := hartIdNexusNode := BundleBridgeNameNode("hartid")
/** Node for broadcasting a reset vector to diplomatic consumers within the tile. */
val resetVectorNexusNode: BundleBridgeNode[UInt] = BundleBroadcast[UInt]()
/** Node for consuming the reset vector input in tile-layer Chisel logic.
*
* Its width is sized by looking at the size of the address space visible
* on the tile's master ports, but this lookup is not evaluated until
* diplomacy has completed and Chisel elaboration has begun.
*/
val resetVectorSinkNode = BundleBridgeSink[UInt](Some(() => UInt(visiblePhysAddrBits.W)))
/** Node for supplying a reset vector that processors in this tile might begin fetching instructions from as they come out of reset. */
val resetVectorNode: BundleBridgeInwardNode[UInt] =
resetVectorSinkNode := resetVectorNexusNode := BundleBridgeNameNode("reset_vector")
/** Nodes for connecting NMI interrupt sources and vectors into the tile */
val nmiSinkNode = Option.when(tileParams.core.useNMI) {
BundleBridgeSink[NMI](Some(() => new NMI(visiblePhysAddrBits)))
}
val nmiNode: Option[BundleBridgeInwardNode[NMI]] = nmiSinkNode.map(_ := BundleBridgeNameNode("nmi"))
/** Node for broadcasting an address prefix to diplomatic consumers within the tile.
*
* The prefix should be applied by consumers by or-ing ouputs of this node
* with a static base address (which is looked up based on the driven hartid value).
*/
val mmioAddressPrefixNexusNode = BundleBridgeNexus[UInt](
inputFn = BundleBridgeNexus.orReduction[UInt](registered = false) _,
outputFn = BundleBridgeNexus.fillN[UInt](registered = false) _,
default = Some(() => 0.U(1.W))
)
/** Node for external drivers to prefix base addresses of MMIO devices to which the core has a direct access path. */
val mmioAddressPrefixNode: BundleBridgeInwardNode[UInt] =
mmioAddressPrefixNexusNode :=* BundleBridgeNameNode("mmio_address_prefix")
// TODO: Any node marked "consumed by the core" or "driven by the core"
// should be moved to either be: a member of a specific BaseTile subclass,
// or actually just a member of the core's LazyModule itself,
// assuming the core itself is diplomatic.
// Then these nodes should just become IdentityNodes of their respective type
protected def traceRetireWidth = tileParams.core.retireWidth
/** Node for the core to drive legacy "raw" instruction trace. */
val traceSourceNode = BundleBridgeSource(() => new TraceBundle)
/** Node for external consumers to source a legacy instruction trace from the core. */
val traceNode = traceSourceNode
def traceCoreParams = new TraceCoreParams()
/** Node for core to drive instruction trace conforming to RISC-V Processor Trace spec V1.0 */
val traceCoreSourceNode = BundleBridgeSource(() => new TraceCoreInterface(traceCoreParams))
/** Node for external consumers to source a V1.0 instruction trace from the core. */
val traceCoreNode = traceCoreSourceNode
/** Node to broadcast collected trace sideband signals into the tile. */
val traceAuxNexusNode = BundleBridgeNexus[TraceAux](default = Some(() => {
val aux = Wire(new TraceAux)
aux.stall := false.B
aux.enable := false.B
aux
}))
/** Trace sideband signals to be consumed by the core. */
val traceAuxSinkNode = BundleBridgeSink[TraceAux]()
/** Trace sideband signals collected here to be driven into the tile. */
val traceAuxNode: BundleBridgeInwardNode[TraceAux] =
traceAuxSinkNode := traceAuxNexusNode :=* BundleBridgeNameNode("trace_aux")
/** Node for watchpoints to control trace driven by core. */
val bpwatchSourceNode = BundleBridgeSource(() => Vec(tileParams.core.nBreakpoints, new BPWatch(traceRetireWidth)))
/** Node to broadcast watchpoints to control trace. */
val bpwatchNexusNode = BundleBroadcast[Vec[BPWatch]]()
/** Node for external consumers to source watchpoints to control trace. */
val bpwatchNode: BundleBridgeOutwardNode[Vec[BPWatch]] =
BundleBridgeNameNode("bpwatch") :*= bpwatchNexusNode := bpwatchSourceNode
/** Helper function for connecting MMIO devices inside the tile to an xbar that will make them visible to external masters. */
def connectTLSlave(xbarNode: TLOutwardNode, node: TLNode, bytes: Int): Unit = {
DisableMonitors { implicit p =>
(Seq(node, TLFragmenter(bytes, cacheBlockBytes, earlyAck=EarlyAck.PutFulls))
++ (xBytes != bytes).option(TLWidthWidget(xBytes)))
.foldRight(xbarNode)(_ :*= _)
}
}
def connectTLSlave(node: TLNode, bytes: Int): Unit = { connectTLSlave(tlSlaveXbar.node, node, bytes) }
/** TileLink node which represents the view that the intra-tile masters have of the rest of the system. */
val visibilityNode = p(TileVisibilityNodeKey)
protected def visibleManagers = visibilityNode.edges.out.flatMap(_.manager.managers)
protected def visiblePhysAddrBits = visibilityNode.edges.out.head.bundle.addressBits
def unifyManagers: List[TLManagerParameters] = ManagerUnification(visibleManagers)
/** Finds resource labels for all the outward caches. */
def nextLevelCacheProperty: PropertyOption = {
val outer = visibleManagers
.filter(_.supportsAcquireB)
.flatMap(_.resources.headOption)
.map(_.owner.label)
.distinct
if (outer.isEmpty) None
else Some("next-level-cache" -> outer.map(l => ResourceReference(l)).toList)
}
/** Create a DTS representation of this "cpu". */
def cpuProperties: PropertyMap = Map(
"device_type" -> "cpu".asProperty,
"status" -> "okay".asProperty,
"clock-frequency" -> tileParams.core.bootFreqHz.asProperty,
"riscv,isa" -> isaDTS.asProperty,
"timebase-frequency" -> p(DTSTimebase).asProperty,
"hardware-exec-breakpoint-count" -> tileParams.core.nBreakpoints.asProperty
)
/** Can be used to access derived params calculated by HasCoreParameters
*
* However, callers must ensure they do not access a diplomatically-determined parameter
* before the graph in question has been fully connected.
*/
protected lazy val lazyCoreParamsView: HasCoreParameters = {
class C(implicit val p: Parameters) extends HasCoreParameters
new C
}
this.suggestName(tileParams.baseName)
}
abstract class BaseTileModuleImp[+L <: BaseTile](outer: L) extends BaseHierarchicalElementModuleImp[L](outer)
File HellaCache.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3.{dontTouch, _}
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.bundlebridge._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.AMBAProtField
import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType}
import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters}
import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata}
import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle}
import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt}
import scala.collection.mutable.ListBuffer
case class DCacheParams(
nSets: Int = 64,
nWays: Int = 4,
rowBits: Int = 64,
subWordBits: Option[Int] = None,
replacementPolicy: String = "random",
nTLBSets: Int = 1,
nTLBWays: Int = 32,
nTLBBasePageSectors: Int = 4,
nTLBSuperpages: Int = 4,
tagECC: Option[String] = None,
dataECC: Option[String] = None,
dataECCBytes: Int = 1,
nMSHRs: Int = 1,
nSDQ: Int = 17,
nRPQ: Int = 16,
nMMIOs: Int = 1,
blockBytes: Int = 64,
separateUncachedResp: Boolean = false,
acquireBeforeRelease: Boolean = false,
pipelineWayMux: Boolean = false,
clockGate: Boolean = false,
scratch: Option[BigInt] = None) extends L1CacheParams {
def tagCode: Code = Code.fromString(tagECC)
def dataCode: Code = Code.fromString(dataECC)
def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0)
def replacement = new RandomReplacement(nWays)
def silentDrop: Boolean = !acquireBeforeRelease
require((!scratch.isDefined || nWays == 1),
"Scratchpad only allowed in direct-mapped cache.")
require((!scratch.isDefined || nMSHRs == 0),
"Scratchpad only allowed in blocking cache.")
if (scratch.isEmpty)
require(isPow2(nSets), s"nSets($nSets) must be pow2")
}
trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters {
val cacheParams = tileParams.dcache.get
val cfg = cacheParams
def wordBits = coreDataBits
def wordBytes = coreDataBytes
def subWordBits = cacheParams.subWordBits.getOrElse(wordBits)
def subWordBytes = subWordBits / 8
def wordOffBits = log2Up(wordBytes)
def beatBytes = cacheBlockBytes / cacheDataBeats
def beatWords = beatBytes / wordBytes
def beatOffBits = log2Up(beatBytes)
def idxMSB = untagBits-1
def idxLSB = blockOffBits
def offsetmsb = idxLSB-1
def offsetlsb = wordOffBits
def rowWords = rowBits/wordBits
def doNarrowRead = coreDataBits * nWays % rowBits == 0
def eccBytes = cacheParams.dataECCBytes
val eccBits = cacheParams.dataECCBytes * 8
val encBits = cacheParams.dataCode.width(eccBits)
val encWordBits = encBits * (wordBits / eccBits)
def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only
def encRowBits = encDataBits*rowWords
def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed
def lrscBackoff = 3 // disallow LRSC reacquisition briefly
def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant
def nIOMSHRs = cacheParams.nMMIOs
def maxUncachedInFlight = cacheParams.nMMIOs
def dataScratchpadSize = cacheParams.dataScratchpadBytes
require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)")
if (!usingDataScratchpad)
require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)")
// would need offset addr for puts if data width < xlen
require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)")
}
abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module
with HasL1HellaCacheParameters
abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p)
with HasL1HellaCacheParameters
/** Bundle definitions for HellaCache interfaces */
trait HasCoreMemOp extends HasL1HellaCacheParameters {
val addr = UInt(coreMaxAddrBits.W)
val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W))
val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W)
val cmd = UInt(M_SZ.W)
val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W)
val signed = Bool()
val dprv = UInt(PRV.SZ.W)
val dv = Bool()
}
trait HasCoreData extends HasCoreParameters {
val data = UInt(coreDataBits.W)
val mask = UInt(coreDataBytes.W)
}
class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp {
val phys = Bool()
val no_resp = Bool() // The dcache may omit generating a response for this request
val no_alloc = Bool()
val no_xcpt = Bool()
}
class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData
class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreMemOp
with HasCoreData {
val replay = Bool()
val has_data = Bool()
val data_word_bypass = UInt(coreDataBits.W)
val data_raw = UInt(coreDataBits.W)
val store_data = UInt(coreDataBits.W)
}
class AlignmentExceptions extends Bundle {
val ld = Bool()
val st = Bool()
}
class HellaCacheExceptions extends Bundle {
val ma = new AlignmentExceptions
val pf = new AlignmentExceptions
val gf = new AlignmentExceptions
val ae = new AlignmentExceptions
}
class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData
class HellaCachePerfEvents extends Bundle {
val acquire = Bool()
val release = Bool()
val grant = Bool()
val tlbMiss = Bool()
val blocked = Bool()
val canAcceptStoreThenLoad = Bool()
val canAcceptStoreThenRMW = Bool()
val canAcceptLoadThenLoad = Bool()
val storeBufferEmptyAfterLoad = Bool()
val storeBufferEmptyAfterStore = Bool()
}
// interface between D$ and processor/DTLB
class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) {
val req = Decoupled(new HellaCacheReq)
val s1_kill = Output(Bool()) // kill previous cycle's req
val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req
val s2_nack = Input(Bool()) // req from two cycles ago is rejected
val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint)
val s2_kill = Output(Bool()) // kill req from two cycles ago
val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO
val s2_paddr = Input(UInt(paddrBits.W)) // translated address
val resp = Flipped(Valid(new HellaCacheResp))
val replay_next = Input(Bool())
val s2_xcpt = Input(new HellaCacheExceptions)
val s2_gpa = Input(UInt(vaddrBitsExtended.W))
val s2_gpa_is_pte = Input(Bool())
val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp)))
val ordered = Input(Bool())
val store_pending = Input(Bool()) // there is a store in a store buffer somewhere
val perf = Input(new HellaCachePerfEvents())
val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself?
val clock_enabled = Input(Bool()) // is D$ currently being clocked?
}
/** Base classes for Diplomatic TL2 HellaCaches */
abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule
with HasNonDiplomaticTileParameters {
protected val cfg = tileParams.dcache.get
protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache",
sourceId = IdRange(0, 1 max cfg.nMSHRs),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes))))
protected def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache MMIO",
sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs),
requestFifo = true))
def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
clients = cacheClientParameters ++ mmioClientParameters,
minLatency = 1,
requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField())))))
val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val module: HellaCacheModule
def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT)
def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits)
require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$")
}
class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) {
val cpu = Flipped(new HellaCacheIO)
val ptw = new TLBPTWIO()
val errors = new DCacheErrors
val tlb_port = new DCacheTLBPort
}
class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer)
with HasL1HellaCacheParameters {
implicit val edge: TLEdgeOut = outer.node.edges.out(0)
val (tl_out, _) = outer.node.out(0)
val io = IO(new HellaCacheBundle)
val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle)
val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle)
dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals
dontTouch(io.cpu.s1_data)
require(rowBits == edge.bundle.dataBits)
private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile)
fifoManagers.foreach { m =>
require (m.fifoId == fifoManagers.head.fifoId,
s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+
s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}")
}
}
/** Support overriding which HellaCache is instantiated */
case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply)
object HellaCacheFactory {
def apply(tile: BaseTile)(p: Parameters): HellaCache = {
if (tile.tileParams.dcache.get.nMSHRs == 0)
new DCache(tile.tileId, tile.crossing)(p)
else
new NonBlockingDCache(tile.tileId)(p)
}
}
/** Mix-ins for constructing tiles that have a HellaCache */
trait HasHellaCache { this: BaseTile =>
val module: HasHellaCacheModule
implicit val p: Parameters
var nDCachePorts = 0
lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p))
tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node
dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode }
dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode }
InModuleBody {
dcache.module.io.tlb_port := DontCare
}
}
trait HasHellaCacheModule {
val outer: HasHellaCache with HasTileParameters
implicit val p: Parameters
val dcachePorts = ListBuffer[HellaCacheIO]()
val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p))
outer.dcache.module.io.cpu <> dcacheArb.io.mem
}
/** Metadata array used for all HellaCaches */
class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val coh = new ClientMetadata
val tag = UInt(tagBits.W)
}
object L1Metadata {
def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = {
val meta = Wire(new L1Metadata)
meta.tag := tag
meta.coh := coh
meta
}
}
class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val idx = UInt(idxBits.W)
val way_en = UInt(nWays.W)
val tag = UInt(tagBits.W)
}
class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) {
val data = new L1Metadata
}
class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val rstVal = onReset()
val io = IO(new Bundle {
val read = Flipped(Decoupled(new L1MetaReadReq))
val write = Flipped(Decoupled(new L1MetaWriteReq))
val resp = Output(Vec(nWays, rstVal.cloneType))
})
val rst_cnt = RegInit(0.U(log2Up(nSets+1).W))
val rst = rst_cnt < nSets.U
val waddr = Mux(rst, rst_cnt, io.write.bits.idx)
val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt
val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools
val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools
when (rst) { rst_cnt := rst_cnt+1.U }
val metabits = rstVal.getWidth
val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W)))
val wen = rst || io.write.valid
when (wen) {
tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask)
}
io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal)))
io.read.ready := !wen // so really this could be a 6T RAM
io.write.ready := !rst
}
File Interrupts.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tile
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.resources.{Device, DeviceSnippet, Description, ResourceBinding, ResourceInt}
import freechips.rocketchip.interrupts.{IntIdentityNode, IntSinkNode, IntSinkPortSimple, IntSourceNode, IntSourcePortSimple}
import freechips.rocketchip.util.CanHaveErrors
import freechips.rocketchip.resources.{IntToProperty, StringToProperty}
import freechips.rocketchip.util.BooleanToAugmentedBoolean
class NMI(val w: Int) extends Bundle {
val rnmi = Bool()
val rnmi_interrupt_vector = UInt(w.W)
val rnmi_exception_vector = UInt(w.W)
}
class TileInterrupts(implicit p: Parameters) extends CoreBundle()(p) {
val debug = Bool()
val mtip = Bool()
val msip = Bool()
val meip = Bool()
val seip = usingSupervisor.option(Bool())
val lip = Vec(coreParams.nLocalInterrupts, Bool())
val nmi = usingNMI.option(new NMI(resetVectorLen))
}
// Use diplomatic interrupts to external interrupts from the subsystem into the tile
trait SinksExternalInterrupts { this: BaseTile =>
val intInwardNode = intXbar.intnode :=* IntIdentityNode()(ValName("int_local"))
protected val intSinkNode = IntSinkNode(IntSinkPortSimple())
intSinkNode := intXbar.intnode
def cpuDevice: Device
val intcDevice = new DeviceSnippet {
override def parent = Some(cpuDevice)
def describe(): Description = {
Description("interrupt-controller", Map(
"compatible" -> "riscv,cpu-intc".asProperty,
"interrupt-controller" -> Nil,
"#interrupt-cells" -> 1.asProperty))
}
}
ResourceBinding {
intSinkNode.edges.in.flatMap(_.source.sources).map { case s =>
for (i <- s.range.start until s.range.end) {
csrIntMap.lift(i).foreach { j =>
s.resources.foreach { r =>
r.bind(intcDevice, ResourceInt(j))
}
}
}
}
}
// TODO: the order of the following two functions must match, and
// also match the order which things are connected to the
// per-tile crossbar in subsystem.HasTiles.connectInterrupts
// debug, msip, mtip, meip, seip, lip offsets in CSRs
def csrIntMap: List[Int] = {
val nlips = tileParams.core.nLocalInterrupts
val seip = if (usingSupervisor) Seq(9) else Nil
List(65535, 3, 7, 11) ++ seip ++ List.tabulate(nlips)(_ + 16)
}
// go from flat diplomatic Interrupts to bundled TileInterrupts
def decodeCoreInterrupts(core: TileInterrupts): Unit = {
val async_ips = Seq(core.debug)
val periph_ips = Seq(
core.msip,
core.mtip,
core.meip)
val seip = if (core.seip.isDefined) Seq(core.seip.get) else Nil
val core_ips = core.lip
val (interrupts, _) = intSinkNode.in(0)
(async_ips ++ periph_ips ++ seip ++ core_ips).zip(interrupts).foreach { case(c, i) => c := i }
}
}
trait SourcesExternalNotifications { this: BaseTile =>
// Report unrecoverable error conditions
val haltNode = IntSourceNode(IntSourcePortSimple())
def reportHalt(could_halt: Option[Bool]): Unit = {
val (halt_and_catch_fire, _) = haltNode.out(0)
halt_and_catch_fire(0) := could_halt.map(RegEnable(true.B, false.B, _)).getOrElse(false.B)
}
def reportHalt(errors: Seq[CanHaveErrors]): Unit = {
reportHalt(errors.flatMap(_.uncorrectable).map(_.valid).reduceOption(_||_))
}
// Report when the tile has ceased to retire instructions
val ceaseNode = IntSourceNode(IntSourcePortSimple())
def reportCease(could_cease: Option[Bool], quiescenceCycles: Int = 8): Unit = {
def waitForQuiescence(cease: Bool): Bool = {
// don't report cease until signal is stable for longer than any pipeline depth
val count = RegInit(0.U(log2Ceil(quiescenceCycles + 1).W))
val saturated = count >= quiescenceCycles.U
when (!cease) { count := 0.U }
when (cease && !saturated) { count := count + 1.U }
saturated
}
val (cease, _) = ceaseNode.out(0)
cease(0) := could_cease.map{ c =>
val cease = (waitForQuiescence(c))
// Test-Only Code --
val prev_cease = RegNext(cease, false.B)
assert(!(prev_cease & !cease), "CEASE line can not glitch once raised")
cease
}.getOrElse(false.B)
}
// Report when the tile is waiting for an interrupt
val wfiNode = IntSourceNode(IntSourcePortSimple())
def reportWFI(could_wfi: Option[Bool]): Unit = {
val (wfi, _) = wfiNode.out(0)
wfi(0) := could_wfi.map(RegNext(_, init=false.B)).getOrElse(false.B)
}
}
File WidthWidget.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.util.{Repeater, UIntToOH1}
// innBeatBytes => the new client-facing bus width
class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule
{
private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes
val node = new TLAdapterNode(
clientFn = { case c => c },
managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){
override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired)
}
override lazy val desiredName = s"TLWidthWidget$innerBeatBytes"
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = outBytes / inBytes
val keepBits = log2Ceil(outBytes)
val dropBits = log2Ceil(inBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR }
val corrupt_reg = RegInit(false.B)
val corrupt_in = edgeIn.corrupt(in.bits)
val corrupt_out = corrupt_in || corrupt_reg
when (in.fire) {
count := count + 1.U
corrupt_reg := corrupt_out
when (last) {
count := 0.U
corrupt_reg := false.B
}
}
def helper(idata: UInt): UInt = {
// rdata is X until the first time a multi-beat write occurs.
// Prevent the X from leaking outside by jamming the mux control until
// the first time rdata is written (and hence no longer X).
val rdata_written_once = RegInit(false.B)
val masked_enable = enable.map(_ || !rdata_written_once)
val odata = Seq.fill(ratio) { WireInit(idata) }
val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata)))
val pdata = rdata :+ idata
val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) }
when (in.fire && !last) {
rdata_written_once := true.B
(rdata zip mdata) foreach { case (r, m) => r := m }
}
Cat(mdata.reverse)
}
in.ready := out.ready || !last
out.valid := in.valid && last
out.bits := in.bits
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits)))
edgeOut.corrupt(out.bits) := corrupt_out
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleC, i: TLBundleC) => ()
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossible bundle combination in WidthWidget")
}
}
def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = inBytes / outBytes
val keepBits = log2Ceil(inBytes)
val dropBits = log2Ceil(outBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
when (out.fire) {
count := count + 1.U
when (last) { count := 0.U }
}
// For sub-beat transfer, extract which part matters
val sel = in.bits match {
case a: TLBundleA => a.address(keepBits-1, dropBits)
case b: TLBundleB => b.address(keepBits-1, dropBits)
case c: TLBundleC => c.address(keepBits-1, dropBits)
case d: TLBundleD => {
val sel = sourceMap(d.source)
val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer
hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway
}
}
val index = sel | count
def helper(idata: UInt, width: Int): UInt = {
val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) }
mux(index)
}
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8))
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1)
case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1)
case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossbile bundle combination in WidthWidget")
}
// Repeat the input if we're not last
!last
}
def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) {
// nothing to do; pass it through
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
} else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) {
// split input to output
val repeat = Wire(Bool())
val repeated = Repeater(in, repeat)
val cated = Wire(chiselTypeOf(repeated))
cated <> repeated
edgeIn.data(cated.bits) := Cat(
edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8),
edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0))
repeat := split(edgeIn, cated, edgeOut, out, sourceMap)
} else {
// merge input to output
merge(edgeIn, in, edgeOut, out)
}
}
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// If the master is narrower than the slave, the D channel must be narrowed.
// This is tricky, because the D channel has no address data.
// Thus, you don't know which part of a sub-beat transfer to extract.
// To fix this, we record the relevant address bits for all sources.
// The assumption is that this sort of situation happens only where
// you connect a narrow master to the system bus, so there are few sources.
def sourceMap(source_bits: UInt) = {
val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits
require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes)
val keepBits = log2Ceil(edgeOut.manager.beatBytes)
val dropBits = log2Ceil(edgeIn.manager.beatBytes)
val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W)))
val a_sel = in.a.bits.address(keepBits-1, dropBits)
when (in.a.fire) {
if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning
sources(0) := a_sel
} else {
sources(in.a.bits.source) := a_sel
}
}
// depopulate unused source registers:
edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U }
val bypass = in.a.valid && in.a.bits.source === source
if (edgeIn.manager.minLatency > 0) sources(source)
else Mux(bypass, a_sel, sources(source))
}
splice(edgeIn, in.a, edgeOut, out.a, sourceMap)
splice(edgeOut, out.d, edgeIn, in.d, sourceMap)
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
splice(edgeOut, out.b, edgeIn, in.b, sourceMap)
splice(edgeIn, in.c, edgeOut, out.c, sourceMap)
out.e.valid := in.e.valid
out.e.bits := in.e.bits
in.e.ready := out.e.ready
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLWidthWidget
{
def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode =
{
val widget = LazyModule(new TLWidthWidget(innerBeatBytes))
widget.node
}
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("WidthWidget"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
(ram.node
:= TLDelayer(0.1)
:= TLFragmenter(4, 256)
:= TLWidthWidget(second)
:= TLWidthWidget(first)
:= TLDelayer(0.1)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module)
dut.io.start := DontCare
io.finished := dut.io.finished
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Frontend.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.bundlebridge._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.tile.{CoreBundle, BaseTile}
import freechips.rocketchip.tilelink.{TLWidthWidget, TLEdgeOut}
import freechips.rocketchip.util.{ClockGate, ShiftQueue, property}
import freechips.rocketchip.util.UIntToAugmentedUInt
class FrontendReq(implicit p: Parameters) extends CoreBundle()(p) {
val pc = UInt(vaddrBitsExtended.W)
val speculative = Bool()
}
class FrontendExceptions extends Bundle {
val pf = new Bundle {
val inst = Bool()
}
val gf = new Bundle {
val inst = Bool()
}
val ae = new Bundle {
val inst = Bool()
}
}
class FrontendResp(implicit p: Parameters) extends CoreBundle()(p) {
val btb = new BTBResp
val pc = UInt(vaddrBitsExtended.W) // ID stage PC
val data = UInt((fetchWidth * coreInstBits).W)
val mask = Bits(fetchWidth.W)
val xcpt = new FrontendExceptions
val replay = Bool()
}
class FrontendPerfEvents extends Bundle {
val acquire = Bool()
val tlbMiss = Bool()
}
class FrontendIO(implicit p: Parameters) extends CoreBundle()(p) {
val might_request = Output(Bool())
val clock_enabled = Input(Bool())
val req = Valid(new FrontendReq)
val sfence = Valid(new SFenceReq)
val resp = Flipped(Decoupled(new FrontendResp))
val gpa = Flipped(Valid(UInt(vaddrBitsExtended.W)))
val gpa_is_pte = Input(Bool())
val btb_update = Valid(new BTBUpdate)
val bht_update = Valid(new BHTUpdate)
val ras_update = Valid(new RASUpdate)
val flush_icache = Output(Bool())
val npc = Input(UInt(vaddrBitsExtended.W))
val perf = Input(new FrontendPerfEvents())
val progress = Output(Bool())
}
class Frontend(val icacheParams: ICacheParams, tileId: Int)(implicit p: Parameters) extends LazyModule {
lazy val module = new FrontendModule(this)
val icache = LazyModule(new ICache(icacheParams, tileId))
val masterNode = icache.masterNode
val slaveNode = icache.slaveNode
val resetVectorSinkNode = BundleBridgeSink[UInt](Some(() => UInt(masterNode.edges.out.head.bundle.addressBits.W)))
}
class FrontendBundle(val outer: Frontend) extends CoreBundle()(outer.p) {
val cpu = Flipped(new FrontendIO())
val ptw = new TLBPTWIO()
val errors = new ICacheErrors
}
class FrontendModule(outer: Frontend) extends LazyModuleImp(outer)
with HasRocketCoreParameters
with HasL1ICacheParameters {
val io = IO(new FrontendBundle(outer))
val io_reset_vector = outer.resetVectorSinkNode.bundle
implicit val edge: TLEdgeOut = outer.masterNode.edges.out(0)
val icache = outer.icache.module
require(fetchWidth*coreInstBytes == outer.icacheParams.fetchBytes)
val fq = withReset(reset.asBool || io.cpu.req.valid) { Module(new ShiftQueue(new FrontendResp, 5, flow = true)) }
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cpu.might_request
io.cpu.clock_enabled := clock_en
assert(!(io.cpu.req.valid || io.cpu.sfence.valid || io.cpu.flush_icache || io.cpu.bht_update.valid || io.cpu.btb_update.valid) || io.cpu.might_request)
val gated_clock =
if (!rocketParams.clockGate) clock
else ClockGate(clock, clock_en, "icache_clock_gate")
icache.clock := gated_clock
icache.io.clock_enabled := clock_en
withClock (gated_clock) { // entering gated-clock domain
val tlb = Module(new TLB(true, log2Ceil(fetchBytes), TLBConfig(nTLBSets, nTLBWays, outer.icacheParams.nTLBBasePageSectors, outer.icacheParams.nTLBSuperpages)))
val s1_valid = Reg(Bool())
val s2_valid = RegInit(false.B)
val s0_fq_has_space =
!fq.io.mask(fq.io.mask.getWidth-3) ||
(!fq.io.mask(fq.io.mask.getWidth-2) && (!s1_valid || !s2_valid)) ||
(!fq.io.mask(fq.io.mask.getWidth-1) && (!s1_valid && !s2_valid))
val s0_valid = io.cpu.req.valid || s0_fq_has_space
s1_valid := s0_valid
val s1_pc = Reg(UInt(vaddrBitsExtended.W))
val s1_speculative = Reg(Bool())
val s2_pc = RegInit(t = UInt(vaddrBitsExtended.W), alignPC(io_reset_vector))
val s2_btb_resp_valid = if (usingBTB) Reg(Bool()) else false.B
val s2_btb_resp_bits = Reg(new BTBResp)
val s2_btb_taken = s2_btb_resp_valid && s2_btb_resp_bits.taken
val s2_tlb_resp = Reg(tlb.io.resp.cloneType)
val s2_xcpt = s2_tlb_resp.ae.inst || s2_tlb_resp.pf.inst || s2_tlb_resp.gf.inst
val s2_speculative = RegInit(false.B)
val s2_partial_insn_valid = RegInit(false.B)
val s2_partial_insn = Reg(UInt(coreInstBits.W))
val wrong_path = RegInit(false.B)
val s1_base_pc = ~(~s1_pc | (fetchBytes - 1).U)
val ntpc = s1_base_pc + fetchBytes.U
val predicted_npc = WireDefault(ntpc)
val predicted_taken = WireDefault(false.B)
val s2_replay = Wire(Bool())
s2_replay := (s2_valid && !fq.io.enq.fire) || RegNext(s2_replay && !s0_valid, true.B)
val npc = Mux(s2_replay, s2_pc, predicted_npc)
s1_pc := io.cpu.npc
// consider RVC fetches across blocks to be non-speculative if the first
// part was non-speculative
val s0_speculative =
if (usingCompressed) s1_speculative || s2_valid && !s2_speculative || predicted_taken
else true.B
s1_speculative := Mux(io.cpu.req.valid, io.cpu.req.bits.speculative, Mux(s2_replay, s2_speculative, s0_speculative))
val s2_redirect = WireDefault(io.cpu.req.valid)
s2_valid := false.B
when (!s2_replay) {
s2_valid := !s2_redirect
s2_pc := s1_pc
s2_speculative := s1_speculative
s2_tlb_resp := tlb.io.resp
}
val recent_progress_counter_init = 3.U
val recent_progress_counter = RegInit(recent_progress_counter_init)
val recent_progress = recent_progress_counter > 0.U
when(io.ptw.req.fire && recent_progress) { recent_progress_counter := recent_progress_counter - 1.U }
when(io.cpu.progress) { recent_progress_counter := recent_progress_counter_init }
val s2_kill_speculative_tlb_refill = s2_speculative && !recent_progress
io.ptw <> tlb.io.ptw
tlb.io.req.valid := s1_valid && !s2_replay
tlb.io.req.bits.cmd := M_XRD // Frontend only reads
tlb.io.req.bits.vaddr := s1_pc
tlb.io.req.bits.passthrough := false.B
tlb.io.req.bits.size := log2Ceil(coreInstBytes*fetchWidth).U
tlb.io.req.bits.prv := io.ptw.status.prv
tlb.io.req.bits.v := io.ptw.status.v
tlb.io.sfence := io.cpu.sfence
tlb.io.kill := !s2_valid || s2_kill_speculative_tlb_refill
icache.io.req.valid := s0_valid
icache.io.req.bits.addr := io.cpu.npc
icache.io.invalidate := io.cpu.flush_icache
icache.io.s1_paddr := tlb.io.resp.paddr
icache.io.s2_vaddr := s2_pc
icache.io.s1_kill := s2_redirect || tlb.io.resp.miss || s2_replay
val s2_can_speculatively_refill = s2_tlb_resp.cacheable && !io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableSpeculativeICacheRefill
icache.io.s2_kill := s2_speculative && !s2_can_speculatively_refill || s2_xcpt
icache.io.s2_cacheable := s2_tlb_resp.cacheable
icache.io.s2_prefetch := s2_tlb_resp.prefetchable && !io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableICachePrefetch
fq.io.enq.valid := RegNext(s1_valid) && s2_valid && (icache.io.resp.valid || (s2_kill_speculative_tlb_refill && s2_tlb_resp.miss) || (!s2_tlb_resp.miss && icache.io.s2_kill))
fq.io.enq.bits.pc := s2_pc
io.cpu.npc := alignPC(Mux(io.cpu.req.valid, io.cpu.req.bits.pc, npc))
fq.io.enq.bits.data := icache.io.resp.bits.data
fq.io.enq.bits.mask := ((1 << fetchWidth)-1).U << s2_pc.extract(log2Ceil(fetchWidth)+log2Ceil(coreInstBytes)-1, log2Ceil(coreInstBytes))
fq.io.enq.bits.replay := (icache.io.resp.bits.replay || icache.io.s2_kill && !icache.io.resp.valid && !s2_xcpt) || (s2_kill_speculative_tlb_refill && s2_tlb_resp.miss)
fq.io.enq.bits.btb := s2_btb_resp_bits
fq.io.enq.bits.btb.taken := s2_btb_taken
fq.io.enq.bits.xcpt := s2_tlb_resp
assert(!(s2_speculative && io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableSpeculativeICacheRefill && !icache.io.s2_kill))
when (icache.io.resp.valid && icache.io.resp.bits.ae) { fq.io.enq.bits.xcpt.ae.inst := true.B }
if (usingBTB) {
val btb = Module(new BTB)
btb.io.flush := false.B
btb.io.req.valid := false.B
btb.io.req.bits.addr := s1_pc
btb.io.btb_update := io.cpu.btb_update
btb.io.bht_update := io.cpu.bht_update
btb.io.ras_update.valid := false.B
btb.io.ras_update.bits := DontCare
btb.io.bht_advance.valid := false.B
btb.io.bht_advance.bits := DontCare
when (!s2_replay) {
btb.io.req.valid := !s2_redirect
s2_btb_resp_valid := btb.io.resp.valid
s2_btb_resp_bits := btb.io.resp.bits
}
when (btb.io.resp.valid && btb.io.resp.bits.taken) {
predicted_npc := btb.io.resp.bits.target.sextTo(vaddrBitsExtended)
predicted_taken := true.B
}
val force_taken = io.ptw.customCSRs.bpmStatic
when (io.ptw.customCSRs.flushBTB) { btb.io.flush := true.B }
when (force_taken) { btb.io.bht_update.valid := false.B }
val s2_base_pc = ~(~s2_pc | (fetchBytes-1).U)
val taken_idx = Wire(UInt())
val after_idx = Wire(UInt())
val useRAS = WireDefault(false.B)
val updateBTB = WireDefault(false.B)
// If !prevTaken, ras_update / bht_update is always invalid.
taken_idx := DontCare
after_idx := DontCare
def scanInsns(idx: Int, prevValid: Bool, prevBits: UInt, prevTaken: Bool): Bool = {
def insnIsRVC(bits: UInt) = bits(1,0) =/= 3.U
val prevRVI = prevValid && !insnIsRVC(prevBits)
val valid = fq.io.enq.bits.mask(idx) && !prevRVI
val bits = fq.io.enq.bits.data(coreInstBits*(idx+1)-1, coreInstBits*idx)
val rvc = insnIsRVC(bits)
val rviBits = Cat(bits, prevBits)
val rviBranch = rviBits(6,0) === Instructions.BEQ.value.U.extract(6,0)
val rviJump = rviBits(6,0) === Instructions.JAL.value.U.extract(6,0)
val rviJALR = rviBits(6,0) === Instructions.JALR.value.U.extract(6,0)
val rviReturn = rviJALR && !rviBits(7) && BitPat("b00?01") === rviBits(19,15)
val rviCall = (rviJALR || rviJump) && rviBits(7)
val rvcBranch = bits === Instructions.C_BEQZ || bits === Instructions.C_BNEZ
val rvcJAL = (xLen == 32).B && bits === Instructions32.C_JAL
val rvcJump = bits === Instructions.C_J || rvcJAL
val rvcImm = Mux(bits(14), new RVCDecoder(bits, xLen, fLen).bImm.asSInt, new RVCDecoder(bits, xLen, fLen).jImm.asSInt)
val rvcJR = bits === Instructions.C_MV && bits(6,2) === 0.U
val rvcReturn = rvcJR && BitPat("b00?01") === bits(11,7)
val rvcJALR = bits === Instructions.C_ADD && bits(6,2) === 0.U
val rvcCall = rvcJAL || rvcJALR
val rviImm = Mux(rviBits(3), ImmGen(IMM_UJ, rviBits), ImmGen(IMM_SB, rviBits))
val predict_taken = s2_btb_resp_bits.bht.taken || force_taken
val taken =
prevRVI && (rviJump || rviJALR || rviBranch && predict_taken) ||
valid && (rvcJump || rvcJALR || rvcJR || rvcBranch && predict_taken)
val predictReturn = btb.io.ras_head.valid && (prevRVI && rviReturn || valid && rvcReturn)
val predictJump = prevRVI && rviJump || valid && rvcJump
val predictBranch = predict_taken && (prevRVI && rviBranch || valid && rvcBranch)
when (s2_valid && s2_btb_resp_valid && s2_btb_resp_bits.bridx === idx.U && valid && !rvc) {
// The BTB has predicted that the middle of an RVI instruction is
// a branch! Flush the BTB and the pipeline.
btb.io.flush := true.B
fq.io.enq.bits.replay := true.B
wrong_path := true.B
ccover(wrong_path, "BTB_NON_CFI_ON_WRONG_PATH", "BTB predicted a non-branch was taken while on the wrong path")
}
when (!prevTaken) {
taken_idx := idx.U
after_idx := (idx + 1).U
btb.io.ras_update.valid := fq.io.enq.fire && !wrong_path && (prevRVI && (rviCall || rviReturn) || valid && (rvcCall || rvcReturn))
btb.io.ras_update.bits.cfiType := Mux(Mux(prevRVI, rviReturn, rvcReturn), CFIType.ret,
Mux(Mux(prevRVI, rviCall, rvcCall), CFIType.call,
Mux(Mux(prevRVI, rviBranch, rvcBranch) && !force_taken, CFIType.branch,
CFIType.jump)))
when (!s2_btb_taken) {
when (fq.io.enq.fire && taken && !predictBranch && !predictJump && !predictReturn) {
wrong_path := true.B
}
when (s2_valid && predictReturn) {
useRAS := true.B
}
when (s2_valid && (predictBranch || predictJump)) {
val pc = s2_base_pc | (idx*coreInstBytes).U
val npc =
if (idx == 0) pc.asSInt + Mux(prevRVI, rviImm -& 2.S, rvcImm)
else Mux(prevRVI, pc - coreInstBytes.U, pc).asSInt + Mux(prevRVI, rviImm, rvcImm)
predicted_npc := npc.asUInt
}
}
when (prevRVI && rviBranch || valid && rvcBranch) {
btb.io.bht_advance.valid := fq.io.enq.fire && !wrong_path
btb.io.bht_advance.bits := s2_btb_resp_bits
}
when (!s2_btb_resp_valid && (predictBranch && s2_btb_resp_bits.bht.strongly_taken || predictJump || predictReturn)) {
updateBTB := true.B
}
}
if (idx == fetchWidth-1) {
when (fq.io.enq.fire) {
s2_partial_insn_valid := false.B
when (valid && !prevTaken && !rvc) {
s2_partial_insn_valid := true.B
s2_partial_insn := bits | 0x3.U
}
}
prevTaken || taken
} else {
scanInsns(idx + 1, valid, bits, prevTaken || taken)
}
}
when (!io.cpu.btb_update.valid) {
val fetch_bubble_likely = !fq.io.mask(1)
btb.io.btb_update.valid := fq.io.enq.fire && !wrong_path && fetch_bubble_likely && updateBTB
btb.io.btb_update.bits.prediction.entry := tileParams.btb.get.nEntries.U
btb.io.btb_update.bits.isValid := true.B
btb.io.btb_update.bits.cfiType := btb.io.ras_update.bits.cfiType
btb.io.btb_update.bits.br_pc := s2_base_pc | (taken_idx << log2Ceil(coreInstBytes))
btb.io.btb_update.bits.pc := s2_base_pc
}
btb.io.ras_update.bits.returnAddr := s2_base_pc + (after_idx << log2Ceil(coreInstBytes))
val taken = scanInsns(0, s2_partial_insn_valid, s2_partial_insn, false.B)
when (useRAS) {
predicted_npc := btb.io.ras_head.bits
}
when (fq.io.enq.fire && (s2_btb_taken || taken)) {
s2_partial_insn_valid := false.B
}
when (!s2_btb_taken) {
when (taken) {
fq.io.enq.bits.btb.bridx := taken_idx
fq.io.enq.bits.btb.taken := true.B
fq.io.enq.bits.btb.entry := tileParams.btb.get.nEntries.U
when (fq.io.enq.fire) { s2_redirect := true.B }
}
}
assert(!s2_partial_insn_valid || fq.io.enq.bits.mask(0))
when (s2_redirect) { s2_partial_insn_valid := false.B }
when (io.cpu.req.valid) { wrong_path := false.B }
}
io.cpu.resp <> fq.io.deq
// supply guest physical address to commit stage
val gpa_valid = Reg(Bool())
val gpa = Reg(UInt(vaddrBitsExtended.W))
val gpa_is_pte = Reg(Bool())
when (fq.io.enq.fire && s2_tlb_resp.gf.inst) {
when (!gpa_valid) {
gpa := s2_tlb_resp.gpa
gpa_is_pte := s2_tlb_resp.gpa_is_pte
}
gpa_valid := true.B
}
when (io.cpu.req.valid) {
gpa_valid := false.B
}
io.cpu.gpa.valid := gpa_valid
io.cpu.gpa.bits := gpa
io.cpu.gpa_is_pte := gpa_is_pte
// performance events
io.cpu.perf.acquire := icache.io.perf.acquire
io.cpu.perf.tlbMiss := io.ptw.req.fire
io.errors := icache.io.errors
// gate the clock
clock_en_reg := !rocketParams.clockGate.B ||
io.cpu.might_request || // chicken bit
icache.io.keep_clock_enabled || // I$ miss or ITIM access
s1_valid || s2_valid || // some fetch in flight
!tlb.io.req.ready || // handling TLB miss
!fq.io.mask(fq.io.mask.getWidth-1) // queue not full
} // leaving gated-clock domain
def alignPC(pc: UInt) = ~(~pc | (coreInstBytes - 1).U)
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FRONTEND_$label", "Rocket;;" + desc)
}
/** Mix-ins for constructing tiles that have an ICache-based pipeline frontend */
trait HasICacheFrontend extends CanHavePTW { this: BaseTile =>
val module: HasICacheFrontendModule
val frontend = LazyModule(new Frontend(tileParams.icache.get, tileId))
tlMasterXbar.node := TLWidthWidget(tileParams.icache.get.rowBits/8) := frontend.masterNode
connectTLSlave(frontend.slaveNode, tileParams.core.fetchBytes)
frontend.icache.hartIdSinkNodeOpt.foreach { _ := hartIdNexusNode }
frontend.icache.mmioAddressPrefixSinkNodeOpt.foreach { _ := mmioAddressPrefixNexusNode }
frontend.resetVectorSinkNode := resetVectorNexusNode
nPTWPorts += 1
// This should be a None in the case of not having an ITIM address, when we
// don't actually use the device that is instantiated in the frontend.
private val deviceOpt = if (tileParams.icache.get.itimAddr.isDefined) Some(frontend.icache.device) else None
}
trait HasICacheFrontendModule extends CanHavePTWModule {
val outer: HasICacheFrontend
ptwPorts += outer.frontend.module.io.ptw
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File PTW.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Arbiter, Cat, Decoupled, Enum, Mux1H, OHToUInt, PopCount, PriorityEncoder, PriorityEncoderOH, RegEnable, UIntToOH, Valid, is, isPow2, log2Ceil, switch}
import chisel3.withClock
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.subsystem.CacheBlockBytes
import freechips.rocketchip.tile._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
import scala.collection.mutable.ListBuffer
/** PTE request from TLB to PTW
*
* TLB send a PTE request to PTW when L1TLB miss
*/
class PTWReq(implicit p: Parameters) extends CoreBundle()(p) {
val addr = UInt(vpnBits.W)
val need_gpa = Bool()
val vstage1 = Bool()
val stage2 = Bool()
}
/** PTE info from L2TLB to TLB
*
* containing: target PTE, exceptions, two-satge tanslation info
*/
class PTWResp(implicit p: Parameters) extends CoreBundle()(p) {
/** ptw access exception */
val ae_ptw = Bool()
/** final access exception */
val ae_final = Bool()
/** page fault */
val pf = Bool()
/** guest page fault */
val gf = Bool()
/** hypervisor read */
val hr = Bool()
/** hypervisor write */
val hw = Bool()
/** hypervisor execute */
val hx = Bool()
/** PTE to refill L1TLB
*
* source: L2TLB
*/
val pte = new PTE
/** pte pglevel */
val level = UInt(log2Ceil(pgLevels).W)
/** fragmented_superpage support */
val fragmented_superpage = Bool()
/** homogeneous for both pma and pmp */
val homogeneous = Bool()
val gpa = Valid(UInt(vaddrBits.W))
val gpa_is_pte = Bool()
}
/** IO between TLB and PTW
*
* PTW receives :
* - PTE request
* - CSRs info
* - pmp results from PMP(in TLB)
*/
class TLBPTWIO(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreParameters {
val req = Decoupled(Valid(new PTWReq))
val resp = Flipped(Valid(new PTWResp))
val ptbr = Input(new PTBR())
val hgatp = Input(new PTBR())
val vsatp = Input(new PTBR())
val status = Input(new MStatus())
val hstatus = Input(new HStatus())
val gstatus = Input(new MStatus())
val pmp = Input(Vec(nPMPs, new PMP))
val customCSRs = Flipped(coreParams.customCSRs)
}
/** PTW performance statistics */
class PTWPerfEvents extends Bundle {
val l2miss = Bool()
val l2hit = Bool()
val pte_miss = Bool()
val pte_hit = Bool()
}
/** Datapath IO between PTW and Core
*
* PTW receives CSRs info, pmp checks, sfence instruction info
*
* PTW sends its performance statistics to core
*/
class DatapathPTWIO(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreParameters {
val ptbr = Input(new PTBR())
val hgatp = Input(new PTBR())
val vsatp = Input(new PTBR())
val sfence = Flipped(Valid(new SFenceReq))
val status = Input(new MStatus())
val hstatus = Input(new HStatus())
val gstatus = Input(new MStatus())
val pmp = Input(Vec(nPMPs, new PMP))
val perf = Output(new PTWPerfEvents())
val customCSRs = Flipped(coreParams.customCSRs)
/** enable clock generated by ptw */
val clock_enabled = Output(Bool())
}
/** PTE template for transmission
*
* contains useful methods to check PTE attributes
* @see RV-priv spec 4.3.1 for pgae table entry format
*/
class PTE(implicit p: Parameters) extends CoreBundle()(p) {
val reserved_for_future = UInt(10.W)
val ppn = UInt(44.W)
val reserved_for_software = Bits(2.W)
/** dirty bit */
val d = Bool()
/** access bit */
val a = Bool()
/** global mapping */
val g = Bool()
/** user mode accessible */
val u = Bool()
/** whether the page is executable */
val x = Bool()
/** whether the page is writable */
val w = Bool()
/** whether the page is readable */
val r = Bool()
/** valid bit */
val v = Bool()
/** return true if find a pointer to next level page table */
def table(dummy: Int = 0) = v && !r && !w && !x && !d && !a && !u && reserved_for_future === 0.U
/** return true if find a leaf PTE */
def leaf(dummy: Int = 0) = v && (r || (x && !w)) && a
/** user read */
def ur(dummy: Int = 0) = sr() && u
/** user write*/
def uw(dummy: Int = 0) = sw() && u
/** user execute */
def ux(dummy: Int = 0) = sx() && u
/** supervisor read */
def sr(dummy: Int = 0) = leaf() && r
/** supervisor write */
def sw(dummy: Int = 0) = leaf() && w && d
/** supervisor execute */
def sx(dummy: Int = 0) = leaf() && x
/** full permission: writable and executable in user mode */
def isFullPerm(dummy: Int = 0) = uw() && ux()
}
/** L2TLB PTE template
*
* contains tag bits
* @param nSets number of sets in L2TLB
* @see RV-priv spec 4.3.1 for page table entry format
*/
class L2TLBEntry(nSets: Int)(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreParameters {
val idxBits = log2Ceil(nSets)
val tagBits = maxSVAddrBits - pgIdxBits - idxBits + (if (usingHypervisor) 1 else 0)
val tag = UInt(tagBits.W)
val ppn = UInt(ppnBits.W)
/** dirty bit */
val d = Bool()
/** access bit */
val a = Bool()
/** user mode accessible */
val u = Bool()
/** whether the page is executable */
val x = Bool()
/** whether the page is writable */
val w = Bool()
/** whether the page is readable */
val r = Bool()
}
/** PTW contains L2TLB, and performs page table walk for high level TLB, and cache queries from L1 TLBs(I$, D$, RoCC)
*
* It performs hierarchy page table query to mem for the desired leaf PTE and cache them in l2tlb.
* Besides leaf PTEs, it also caches non-leaf PTEs in pte_cache to accerlerate the process.
*
* ==Structure==
* - l2tlb : for leaf PTEs
* - set-associative (configurable with [[CoreParams.nL2TLBEntries]]and [[CoreParams.nL2TLBWays]]))
* - PLRU
* - pte_cache: for non-leaf PTEs
* - set-associative
* - LRU
* - s2_pte_cache: for non-leaf PTEs in 2-stage translation
* - set-associative
* - PLRU
*
* l2tlb Pipeline: 3 stage
* {{{
* stage 0 : read
* stage 1 : decode
* stage 2 : hit check
* }}}
* ==State Machine==
* s_ready: ready to reveive request from TLB
* s_req: request mem; pte_cache hit judge
* s_wait1: deal with l2tlb error
* s_wait2: final hit judge
* s_wait3: receive mem response
* s_fragment_superpage: for superpage PTE
*
* @note l2tlb hit happens in s_req or s_wait1
* @see RV-priv spec 4.3-4.6 for Virtual-Memory System
* @see RV-priv spec 8.5 for Two-Stage Address Translation
* @todo details in two-stage translation
*/
class PTW(n: Int)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) {
val io = IO(new Bundle {
/** to n TLB */
val requestor = Flipped(Vec(n, new TLBPTWIO))
/** to HellaCache */
val mem = new HellaCacheIO
/** to Core
*
* contains CSRs info and performance statistics
*/
val dpath = new DatapathPTWIO
})
val s_ready :: s_req :: s_wait1 :: s_dummy1 :: s_wait2 :: s_wait3 :: s_dummy2 :: s_fragment_superpage :: Nil = Enum(8)
val state = RegInit(s_ready)
val l2_refill_wire = Wire(Bool())
/** Arbiter to arbite request from n TLB */
val arb = Module(new Arbiter(Valid(new PTWReq), n))
// use TLB req as arbitor's input
arb.io.in <> io.requestor.map(_.req)
// receive req only when s_ready and not in refill
arb.io.out.ready := (state === s_ready) && !l2_refill_wire
val resp_valid = RegNext(VecInit(Seq.fill(io.requestor.size)(false.B)))
val clock_en = state =/= s_ready || l2_refill_wire || arb.io.out.valid || io.dpath.sfence.valid || io.dpath.customCSRs.disableDCacheClockGate
io.dpath.clock_enabled := usingVM.B && clock_en
val gated_clock =
if (!usingVM || !tileParams.dcache.get.clockGate) clock
else ClockGate(clock, clock_en, "ptw_clock_gate")
withClock (gated_clock) { // entering gated-clock domain
val invalidated = Reg(Bool())
/** current PTE level
* {{{
* 0 <= count <= pgLevel-1
* count = pgLevel - 1 : leaf PTE
* count < pgLevel - 1 : non-leaf PTE
* }}}
*/
val count = Reg(UInt(log2Ceil(pgLevels).W))
val resp_ae_ptw = Reg(Bool())
val resp_ae_final = Reg(Bool())
val resp_pf = Reg(Bool())
val resp_gf = Reg(Bool())
val resp_hr = Reg(Bool())
val resp_hw = Reg(Bool())
val resp_hx = Reg(Bool())
val resp_fragmented_superpage = Reg(Bool())
/** tlb request */
val r_req = Reg(new PTWReq)
/** current selected way in arbitor */
val r_req_dest = Reg(Bits())
// to respond to L1TLB : l2_hit
// to construct mem.req.addr
val r_pte = Reg(new PTE)
val r_hgatp = Reg(new PTBR)
// 2-stage pageLevel
val aux_count = Reg(UInt(log2Ceil(pgLevels).W))
/** pte for 2-stage translation */
val aux_pte = Reg(new PTE)
val gpa_pgoff = Reg(UInt(pgIdxBits.W)) // only valid in resp_gf case
val stage2 = Reg(Bool())
val stage2_final = Reg(Bool())
val satp = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp, io.dpath.ptbr)
val r_hgatp_initial_count = pgLevels.U - minPgLevels.U - r_hgatp.additionalPgLevels
/** 2-stage translation both enable */
val do_both_stages = r_req.vstage1 && r_req.stage2
val max_count = count max aux_count
val vpn = Mux(r_req.vstage1 && stage2, aux_pte.ppn, r_req.addr)
val mem_resp_valid = RegNext(io.mem.resp.valid)
val mem_resp_data = RegNext(io.mem.resp.bits.data)
io.mem.uncached_resp.map { resp =>
assert(!(resp.valid && io.mem.resp.valid))
resp.ready := true.B
when (resp.valid) {
mem_resp_valid := true.B
mem_resp_data := resp.bits.data
}
}
// construct pte from mem.resp
val (pte, invalid_paddr, invalid_gpa) = {
val tmp = mem_resp_data.asTypeOf(new PTE())
val res = WireDefault(tmp)
res.ppn := Mux(do_both_stages && !stage2, tmp.ppn(vpnBits.min(tmp.ppn.getWidth)-1, 0), tmp.ppn(ppnBits-1, 0))
when (tmp.r || tmp.w || tmp.x) {
// for superpage mappings, make sure PPN LSBs are zero
for (i <- 0 until pgLevels-1)
when (count <= i.U && tmp.ppn((pgLevels-1-i)*pgLevelBits-1, (pgLevels-2-i)*pgLevelBits) =/= 0.U) { res.v := false.B }
}
(res,
Mux(do_both_stages && !stage2, (tmp.ppn >> vpnBits) =/= 0.U, (tmp.ppn >> ppnBits) =/= 0.U),
do_both_stages && !stage2 && checkInvalidHypervisorGPA(r_hgatp, tmp.ppn))
}
// find non-leaf PTE, need traverse
val traverse = pte.table() && !invalid_paddr && !invalid_gpa && count < (pgLevels-1).U
/** address send to mem for enquerry */
val pte_addr = if (!usingVM) 0.U else {
val vpn_idxs = (0 until pgLevels).map { i =>
val width = pgLevelBits + (if (i <= pgLevels - minPgLevels) hypervisorExtraAddrBits else 0)
(vpn >> (pgLevels - i - 1) * pgLevelBits)(width - 1, 0)
}
val mask = Mux(stage2 && count === r_hgatp_initial_count, ((1 << (hypervisorExtraAddrBits + pgLevelBits)) - 1).U, ((1 << pgLevelBits) - 1).U)
val vpn_idx = vpn_idxs(count) & mask
val raw_pte_addr = ((r_pte.ppn << pgLevelBits) | vpn_idx) << log2Ceil(xLen / 8)
val size = if (usingHypervisor) vaddrBits else paddrBits
//use r_pte.ppn as page table base address
//use vpn slice as offset
raw_pte_addr.apply(size.min(raw_pte_addr.getWidth) - 1, 0)
}
/** stage2_pte_cache input addr */
val stage2_pte_cache_addr = if (!usingHypervisor) 0.U else {
val vpn_idxs = (0 until pgLevels - 1).map { i =>
(r_req.addr >> (pgLevels - i - 1) * pgLevelBits)(pgLevelBits - 1, 0)
}
val vpn_idx = vpn_idxs(aux_count)
val raw_s2_pte_cache_addr = Cat(aux_pte.ppn, vpn_idx) << log2Ceil(xLen / 8)
raw_s2_pte_cache_addr(vaddrBits.min(raw_s2_pte_cache_addr.getWidth) - 1, 0)
}
def makeFragmentedSuperpagePPN(ppn: UInt): Seq[UInt] = {
(pgLevels-1 until 0 by -1).map(i => Cat(ppn >> (pgLevelBits*i), r_req.addr(((pgLevelBits*i) min vpnBits)-1, 0).padTo(pgLevelBits*i)))
}
/** PTECache caches non-leaf PTE
* @param s2 true: 2-stage address translation
*/
def makePTECache(s2: Boolean): (Bool, UInt) = if (coreParams.nPTECacheEntries == 0) {
(false.B, 0.U)
} else {
val plru = new PseudoLRU(coreParams.nPTECacheEntries)
val valid = RegInit(0.U(coreParams.nPTECacheEntries.W))
val tags = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor) 1 + vaddrBits else paddrBits).W)))
// not include full pte, only ppn
val data = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor && s2) vpnBits else ppnBits).W)))
val can_hit =
if (s2) count === r_hgatp_initial_count && aux_count < (pgLevels-1).U && r_req.vstage1 && stage2 && !stage2_final
else count < (pgLevels-1).U && Mux(r_req.vstage1, stage2, !r_req.stage2)
val can_refill =
if (s2) do_both_stages && !stage2 && !stage2_final
else can_hit
val tag =
if (s2) Cat(true.B, stage2_pte_cache_addr.padTo(vaddrBits))
else Cat(r_req.vstage1, pte_addr.padTo(if (usingHypervisor) vaddrBits else paddrBits))
val hits = tags.map(_ === tag).asUInt & valid
val hit = hits.orR && can_hit
// refill with mem response
when (mem_resp_valid && traverse && can_refill && !hits.orR && !invalidated) {
val r = Mux(valid.andR, plru.way, PriorityEncoder(~valid))
valid := valid | UIntToOH(r)
tags(r) := tag
data(r) := pte.ppn
plru.access(r)
}
// replace
when (hit && state === s_req) { plru.access(OHToUInt(hits)) }
when (io.dpath.sfence.valid && (!io.dpath.sfence.bits.rs1 || usingHypervisor.B && io.dpath.sfence.bits.hg)) { valid := 0.U }
val lcount = if (s2) aux_count else count
for (i <- 0 until pgLevels-1) {
ccover(hit && state === s_req && lcount === i.U, s"PTE_CACHE_HIT_L$i", s"PTE cache hit, level $i")
}
(hit, Mux1H(hits, data))
}
// generate pte_cache
val (pte_cache_hit, pte_cache_data) = makePTECache(false)
// generate pte_cache with 2-stage translation
val (stage2_pte_cache_hit, stage2_pte_cache_data) = makePTECache(true)
// pte_cache hit or 2-stage pte_cache hit
val pte_hit = RegNext(false.B)
io.dpath.perf.pte_miss := false.B
io.dpath.perf.pte_hit := pte_hit && (state === s_req) && !io.dpath.perf.l2hit
assert(!(io.dpath.perf.l2hit && (io.dpath.perf.pte_miss || io.dpath.perf.pte_hit)),
"PTE Cache Hit/Miss Performance Monitor Events are lower priority than L2TLB Hit event")
// l2_refill happens when find the leaf pte
val l2_refill = RegNext(false.B)
l2_refill_wire := l2_refill
io.dpath.perf.l2miss := false.B
io.dpath.perf.l2hit := false.B
// l2tlb
val (l2_hit, l2_error, l2_pte, l2_tlb_ram) = if (coreParams.nL2TLBEntries == 0) (false.B, false.B, WireDefault(0.U.asTypeOf(new PTE)), None) else {
val code = new ParityCode
require(isPow2(coreParams.nL2TLBEntries))
require(isPow2(coreParams.nL2TLBWays))
require(coreParams.nL2TLBEntries >= coreParams.nL2TLBWays)
val nL2TLBSets = coreParams.nL2TLBEntries / coreParams.nL2TLBWays
require(isPow2(nL2TLBSets))
val idxBits = log2Ceil(nL2TLBSets)
val l2_plru = new SetAssocLRU(nL2TLBSets, coreParams.nL2TLBWays, "plru")
val ram = DescribedSRAM(
name = "l2_tlb_ram",
desc = "L2 TLB",
size = nL2TLBSets,
data = Vec(coreParams.nL2TLBWays, UInt(code.width(new L2TLBEntry(nL2TLBSets).getWidth).W))
)
val g = Reg(Vec(coreParams.nL2TLBWays, UInt(nL2TLBSets.W)))
val valid = RegInit(VecInit(Seq.fill(coreParams.nL2TLBWays)(0.U(nL2TLBSets.W))))
// use r_req to construct tag
val (r_tag, r_idx) = Split(Cat(r_req.vstage1, r_req.addr(maxSVAddrBits-pgIdxBits-1, 0)), idxBits)
/** the valid vec for the selected set(including n ways) */
val r_valid_vec = valid.map(_(r_idx)).asUInt
val r_valid_vec_q = Reg(UInt(coreParams.nL2TLBWays.W))
val r_l2_plru_way = Reg(UInt(log2Ceil(coreParams.nL2TLBWays max 1).W))
r_valid_vec_q := r_valid_vec
// replacement way
r_l2_plru_way := (if (coreParams.nL2TLBWays > 1) l2_plru.way(r_idx) else 0.U)
// refill with r_pte(leaf pte)
when (l2_refill && !invalidated) {
val entry = Wire(new L2TLBEntry(nL2TLBSets))
entry.ppn := r_pte.ppn
entry.d := r_pte.d
entry.a := r_pte.a
entry.u := r_pte.u
entry.x := r_pte.x
entry.w := r_pte.w
entry.r := r_pte.r
entry.tag := r_tag
// if all the way are valid, use plru to select one way to be replaced,
// otherwise use PriorityEncoderOH to select one
val wmask = if (coreParams.nL2TLBWays > 1) Mux(r_valid_vec_q.andR, UIntToOH(r_l2_plru_way, coreParams.nL2TLBWays), PriorityEncoderOH(~r_valid_vec_q)) else 1.U(1.W)
ram.write(r_idx, VecInit(Seq.fill(coreParams.nL2TLBWays)(code.encode(entry.asUInt))), wmask.asBools)
val mask = UIntToOH(r_idx)
for (way <- 0 until coreParams.nL2TLBWays) {
when (wmask(way)) {
valid(way) := valid(way) | mask
g(way) := Mux(r_pte.g, g(way) | mask, g(way) & ~mask)
}
}
}
// sfence happens
when (io.dpath.sfence.valid) {
val hg = usingHypervisor.B && io.dpath.sfence.bits.hg
for (way <- 0 until coreParams.nL2TLBWays) {
valid(way) :=
Mux(!hg && io.dpath.sfence.bits.rs1, valid(way) & ~UIntToOH(io.dpath.sfence.bits.addr(idxBits+pgIdxBits-1, pgIdxBits)),
Mux(!hg && io.dpath.sfence.bits.rs2, valid(way) & g(way),
0.U))
}
}
val s0_valid = !l2_refill && arb.io.out.fire
val s0_suitable = arb.io.out.bits.bits.vstage1 === arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.need_gpa
val s1_valid = RegNext(s0_valid && s0_suitable && arb.io.out.bits.valid)
val s2_valid = RegNext(s1_valid)
// read from tlb idx
val s1_rdata = ram.read(arb.io.out.bits.bits.addr(idxBits-1, 0), s0_valid)
val s2_rdata = s1_rdata.map(s1_rdway => code.decode(RegEnable(s1_rdway, s1_valid)))
val s2_valid_vec = RegEnable(r_valid_vec, s1_valid)
val s2_g_vec = RegEnable(VecInit(g.map(_(r_idx))), s1_valid)
val s2_error = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && s2_rdata(way).error).orR
when (s2_valid && s2_error) { valid.foreach { _ := 0.U }}
// decode
val s2_entry_vec = s2_rdata.map(_.uncorrected.asTypeOf(new L2TLBEntry(nL2TLBSets)))
val s2_hit_vec = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && (r_tag === s2_entry_vec(way).tag))
val s2_hit = s2_valid && s2_hit_vec.orR
io.dpath.perf.l2miss := s2_valid && !(s2_hit_vec.orR)
io.dpath.perf.l2hit := s2_hit
when (s2_hit) {
l2_plru.access(r_idx, OHToUInt(s2_hit_vec))
assert((PopCount(s2_hit_vec) === 1.U) || s2_error, "L2 TLB multi-hit")
}
val s2_pte = Wire(new PTE)
val s2_hit_entry = Mux1H(s2_hit_vec, s2_entry_vec)
s2_pte.ppn := s2_hit_entry.ppn
s2_pte.d := s2_hit_entry.d
s2_pte.a := s2_hit_entry.a
s2_pte.g := Mux1H(s2_hit_vec, s2_g_vec)
s2_pte.u := s2_hit_entry.u
s2_pte.x := s2_hit_entry.x
s2_pte.w := s2_hit_entry.w
s2_pte.r := s2_hit_entry.r
s2_pte.v := true.B
s2_pte.reserved_for_future := 0.U
s2_pte.reserved_for_software := 0.U
for (way <- 0 until coreParams.nL2TLBWays) {
ccover(s2_hit && s2_hit_vec(way), s"L2_TLB_HIT_WAY$way", s"L2 TLB hit way$way")
}
(s2_hit, s2_error, s2_pte, Some(ram))
}
// if SFENCE occurs during walk, don't refill PTE cache or L2 TLB until next walk
invalidated := io.dpath.sfence.valid || (invalidated && state =/= s_ready)
// mem request
io.mem.keep_clock_enabled := false.B
io.mem.req.valid := state === s_req || state === s_dummy1
io.mem.req.bits.phys := true.B
io.mem.req.bits.cmd := M_XRD
io.mem.req.bits.size := log2Ceil(xLen/8).U
io.mem.req.bits.signed := false.B
io.mem.req.bits.addr := pte_addr
io.mem.req.bits.idx.foreach(_ := pte_addr)
io.mem.req.bits.dprv := PRV.S.U // PTW accesses are S-mode by definition
io.mem.req.bits.dv := do_both_stages && !stage2
io.mem.req.bits.tag := DontCare
io.mem.req.bits.no_resp := false.B
io.mem.req.bits.no_alloc := DontCare
io.mem.req.bits.no_xcpt := DontCare
io.mem.req.bits.data := DontCare
io.mem.req.bits.mask := DontCare
io.mem.s1_kill := l2_hit || (state =/= s_wait1) || resp_gf
io.mem.s1_data := DontCare
io.mem.s2_kill := false.B
val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits)
require(!usingHypervisor || pageGranularityPMPs, s"hypervisor requires pmpGranularity >= ${1<<pgIdxBits}")
val pmaPgLevelHomogeneous = (0 until pgLevels) map { i =>
val pgSize = BigInt(1) << (pgIdxBits + ((pgLevels - 1 - i) * pgLevelBits))
if (pageGranularityPMPs && i == pgLevels - 1) {
require(TLBPageLookup.homogeneous(edge.manager.managers, pgSize), s"All memory regions must be $pgSize-byte aligned")
true.B
} else {
TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), pgSize, xLen/8)(r_pte.ppn << pgIdxBits).homogeneous
}
}
val pmaHomogeneous = pmaPgLevelHomogeneous(count)
val pmpHomogeneous = new PMPHomogeneityChecker(io.dpath.pmp).apply(r_pte.ppn << pgIdxBits, count)
val homogeneous = pmaHomogeneous && pmpHomogeneous
// response to tlb
for (i <- 0 until io.requestor.size) {
io.requestor(i).resp.valid := resp_valid(i)
io.requestor(i).resp.bits.ae_ptw := resp_ae_ptw
io.requestor(i).resp.bits.ae_final := resp_ae_final
io.requestor(i).resp.bits.pf := resp_pf
io.requestor(i).resp.bits.gf := resp_gf
io.requestor(i).resp.bits.hr := resp_hr
io.requestor(i).resp.bits.hw := resp_hw
io.requestor(i).resp.bits.hx := resp_hx
io.requestor(i).resp.bits.pte := r_pte
io.requestor(i).resp.bits.level := max_count
io.requestor(i).resp.bits.homogeneous := homogeneous || pageGranularityPMPs.B
io.requestor(i).resp.bits.fragmented_superpage := resp_fragmented_superpage && pageGranularityPMPs.B
io.requestor(i).resp.bits.gpa.valid := r_req.need_gpa
io.requestor(i).resp.bits.gpa.bits :=
Cat(Mux(!stage2_final || !r_req.vstage1 || aux_count === (pgLevels - 1).U, aux_pte.ppn, makeFragmentedSuperpagePPN(aux_pte.ppn)(aux_count)), gpa_pgoff)
io.requestor(i).resp.bits.gpa_is_pte := !stage2_final
io.requestor(i).ptbr := io.dpath.ptbr
io.requestor(i).hgatp := io.dpath.hgatp
io.requestor(i).vsatp := io.dpath.vsatp
io.requestor(i).customCSRs <> io.dpath.customCSRs
io.requestor(i).status := io.dpath.status
io.requestor(i).hstatus := io.dpath.hstatus
io.requestor(i).gstatus := io.dpath.gstatus
io.requestor(i).pmp := io.dpath.pmp
}
// control state machine
val next_state = WireDefault(state)
state := OptimizationBarrier(next_state)
val do_switch = WireDefault(false.B)
switch (state) {
is (s_ready) {
when (arb.io.out.fire) {
val satp_initial_count = pgLevels.U - minPgLevels.U - satp.additionalPgLevels
val vsatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.vsatp.additionalPgLevels
val hgatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.hgatp.additionalPgLevels
val aux_ppn = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp.ppn, arb.io.out.bits.bits.addr)
r_req := arb.io.out.bits.bits
r_req_dest := arb.io.chosen
next_state := Mux(arb.io.out.bits.valid, s_req, s_ready)
stage2 := arb.io.out.bits.bits.stage2
stage2_final := arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.vstage1
count := Mux(arb.io.out.bits.bits.stage2, hgatp_initial_count, satp_initial_count)
aux_count := Mux(arb.io.out.bits.bits.vstage1, vsatp_initial_count, 0.U)
aux_pte.ppn := aux_ppn
aux_pte.reserved_for_future := 0.U
resp_ae_ptw := false.B
resp_ae_final := false.B
resp_pf := false.B
resp_gf := checkInvalidHypervisorGPA(io.dpath.hgatp, aux_ppn) && arb.io.out.bits.bits.stage2
resp_hr := true.B
resp_hw := true.B
resp_hx := true.B
resp_fragmented_superpage := false.B
r_hgatp := io.dpath.hgatp
assert(!arb.io.out.bits.bits.need_gpa || arb.io.out.bits.bits.stage2)
}
}
is (s_req) {
when(stage2 && count === r_hgatp_initial_count) {
gpa_pgoff := Mux(aux_count === (pgLevels-1).U, r_req.addr << (xLen/8).log2, stage2_pte_cache_addr)
}
// pte_cache hit
when (stage2_pte_cache_hit) {
aux_count := aux_count + 1.U
aux_pte.ppn := stage2_pte_cache_data
aux_pte.reserved_for_future := 0.U
pte_hit := true.B
}.elsewhen (pte_cache_hit) {
count := count + 1.U
pte_hit := true.B
}.otherwise {
next_state := Mux(io.mem.req.ready, s_wait1, s_req)
}
when(resp_gf) {
next_state := s_ready
resp_valid(r_req_dest) := true.B
}
}
is (s_wait1) {
// This Mux is for the l2_error case; the l2_hit && !l2_error case is overriden below
next_state := Mux(l2_hit, s_req, s_wait2)
}
is (s_wait2) {
next_state := s_wait3
io.dpath.perf.pte_miss := count < (pgLevels-1).U
when (io.mem.s2_xcpt.ae.ld) {
resp_ae_ptw := true.B
next_state := s_ready
resp_valid(r_req_dest) := true.B
}
}
is (s_fragment_superpage) {
next_state := s_ready
resp_valid(r_req_dest) := true.B
when (!homogeneous) {
count := (pgLevels-1).U
resp_fragmented_superpage := true.B
}
when (do_both_stages) {
resp_fragmented_superpage := true.B
}
}
}
val merged_pte = {
val superpage_masks = (0 until pgLevels).map(i => ((BigInt(1) << pte.ppn.getWidth) - (BigInt(1) << (pgLevels-1-i)*pgLevelBits)).U)
val superpage_mask = superpage_masks(Mux(stage2_final, max_count, (pgLevels-1).U))
val stage1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), aux_pte.ppn((pgLevels-i-1)*pgLevelBits-1,0))) :+ pte.ppn
val stage1_ppn = stage1_ppns(count)
makePTE(stage1_ppn & superpage_mask, aux_pte)
}
r_pte := OptimizationBarrier(
// l2tlb hit->find a leaf PTE(l2_pte), respond to L1TLB
Mux(l2_hit && !l2_error && !resp_gf, l2_pte,
// S2 PTE cache hit -> proceed to the next level of walking, update the r_pte with hgatp
Mux(state === s_req && stage2_pte_cache_hit, makeHypervisorRootPTE(r_hgatp, stage2_pte_cache_data, l2_pte),
// pte cache hit->find a non-leaf PTE(pte_cache),continue to request mem
Mux(state === s_req && pte_cache_hit, makePTE(pte_cache_data, l2_pte),
// 2-stage translation
Mux(do_switch, makeHypervisorRootPTE(r_hgatp, pte.ppn, r_pte),
// when mem respond, store mem.resp.pte
Mux(mem_resp_valid, Mux(!traverse && r_req.vstage1 && stage2, merged_pte, pte),
// fragment_superpage
Mux(state === s_fragment_superpage && !homogeneous && count =/= (pgLevels - 1).U, makePTE(makeFragmentedSuperpagePPN(r_pte.ppn)(count), r_pte),
// when tlb request come->request mem, use root address in satp(or vsatp,hgatp)
Mux(arb.io.out.fire, Mux(arb.io.out.bits.bits.stage2, makeHypervisorRootPTE(io.dpath.hgatp, io.dpath.vsatp.ppn, r_pte), makePTE(satp.ppn, r_pte)),
r_pte))))))))
when (l2_hit && !l2_error && !resp_gf) {
assert(state === s_req || state === s_wait1)
next_state := s_ready
resp_valid(r_req_dest) := true.B
count := (pgLevels-1).U
}
when (mem_resp_valid) {
assert(state === s_wait3)
next_state := s_req
when (traverse) {
when (do_both_stages && !stage2) { do_switch := true.B }
count := count + 1.U
}.otherwise {
val gf = (stage2 && !stage2_final && !pte.ur()) || (pte.leaf() && pte.reserved_for_future === 0.U && invalid_gpa)
val ae = pte.v && invalid_paddr
val pf = pte.v && pte.reserved_for_future =/= 0.U
val success = pte.v && !ae && !pf && !gf
when (do_both_stages && !stage2_final && success) {
when (stage2) {
stage2 := false.B
count := aux_count
}.otherwise {
stage2_final := true.B
do_switch := true.B
}
}.otherwise {
// find a leaf pte, start l2 refill
l2_refill := success && count === (pgLevels-1).U && !r_req.need_gpa &&
(!r_req.vstage1 && !r_req.stage2 ||
do_both_stages && aux_count === (pgLevels-1).U && pte.isFullPerm())
count := max_count
when (pageGranularityPMPs.B && !(count === (pgLevels-1).U && (!do_both_stages || aux_count === (pgLevels-1).U))) {
next_state := s_fragment_superpage
}.otherwise {
next_state := s_ready
resp_valid(r_req_dest) := true.B
}
resp_ae_ptw := ae && count < (pgLevels-1).U && pte.table()
resp_ae_final := ae && pte.leaf()
resp_pf := pf && !stage2
resp_gf := gf || (pf && stage2)
resp_hr := !stage2 || (!pf && !gf && pte.ur())
resp_hw := !stage2 || (!pf && !gf && pte.uw())
resp_hx := !stage2 || (!pf && !gf && pte.ux())
}
}
}
when (io.mem.s2_nack) {
assert(state === s_wait2)
next_state := s_req
}
when (do_switch) {
aux_count := Mux(traverse, count + 1.U, count)
count := r_hgatp_initial_count
aux_pte := Mux(traverse, pte, {
val s1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), r_req.addr(((pgLevels-i-1)*pgLevelBits min vpnBits)-1,0).padTo((pgLevels-i-1)*pgLevelBits))) :+ pte.ppn
makePTE(s1_ppns(count), pte)
})
stage2 := true.B
}
for (i <- 0 until pgLevels) {
val leaf = mem_resp_valid && !traverse && count === i.U
ccover(leaf && pte.v && !invalid_paddr && !invalid_gpa && pte.reserved_for_future === 0.U, s"L$i", s"successful page-table access, level $i")
ccover(leaf && pte.v && invalid_paddr, s"L${i}_BAD_PPN_MSB", s"PPN too large, level $i")
ccover(leaf && pte.v && invalid_gpa, s"L${i}_BAD_GPA_MSB", s"GPA too large, level $i")
ccover(leaf && pte.v && pte.reserved_for_future =/= 0.U, s"L${i}_BAD_RSV_MSB", s"reserved MSBs set, level $i")
ccover(leaf && !mem_resp_data(0), s"L${i}_INVALID_PTE", s"page not present, level $i")
if (i != pgLevels-1)
ccover(leaf && !pte.v && mem_resp_data(0), s"L${i}_BAD_PPN_LSB", s"PPN LSBs not zero, level $i")
}
ccover(mem_resp_valid && count === (pgLevels-1).U && pte.table(), s"TOO_DEEP", s"page table too deep")
ccover(io.mem.s2_nack, "NACK", "D$ nacked page-table access")
ccover(state === s_wait2 && io.mem.s2_xcpt.ae.ld, "AE", "access exception while walking page table")
} // leaving gated-clock domain
private def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
if (usingVM) property.cover(cond, s"PTW_$label", "MemorySystem;;" + desc)
/** Relace PTE.ppn with ppn */
private def makePTE(ppn: UInt, default: PTE) = {
val pte = WireDefault(default)
pte.ppn := ppn
pte
}
/** use hgatp and vpn to construct a new ppn */
private def makeHypervisorRootPTE(hgatp: PTBR, vpn: UInt, default: PTE) = {
val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels
val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> (pgLevels-i)*pgLevelBits))
val lsbs = WireDefault(UInt(maxHypervisorExtraAddrBits.W), idxs(count))
val pte = WireDefault(default)
pte.ppn := Cat(hgatp.ppn >> maxHypervisorExtraAddrBits, lsbs)
pte
}
/** use hgatp and vpn to check for gpa out of range */
private def checkInvalidHypervisorGPA(hgatp: PTBR, vpn: UInt) = {
val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels
val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> ((pgLevels-i)*pgLevelBits)+maxHypervisorExtraAddrBits))
idxs.extract(count) =/= 0.U
}
}
/** Mix-ins for constructing tiles that might have a PTW */
trait CanHavePTW extends HasTileParameters with HasHellaCache { this: BaseTile =>
val module: CanHavePTWModule
var nPTWPorts = 1
nDCachePorts += usingPTW.toInt
}
trait CanHavePTWModule extends HasHellaCacheModule {
val outer: CanHavePTW
val ptwPorts = ListBuffer(outer.dcache.module.io.ptw)
val ptw = Module(new PTW(outer.nPTWPorts)(outer.dcache.node.edges.out(0), outer.p))
ptw.io.mem <> DontCare
if (outer.usingPTW) {
dcachePorts += ptw.io.mem
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File HierarchicalElement.scala:
package freechips.rocketchip.subsystem
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.devices.debug.TLDebugModule
import freechips.rocketchip.diplomacy.{BufferParams}
import freechips.rocketchip.interrupts.IntXbar
import freechips.rocketchip.prci.{ClockSinkParameters, ResetCrossingType, ClockCrossingType}
import freechips.rocketchip.tile.{LookupByHartIdImpl, TraceBundle}
import freechips.rocketchip.tilelink.{TLNode, TLIdentityNode, TLXbar, TLBuffer, TLInwardNode, TLOutwardNode}
trait HierarchicalElementParams {
val baseName: String // duplicated instances shouuld share a base name
val uniqueName: String
val clockSinkParams: ClockSinkParameters
}
abstract class InstantiableHierarchicalElementParams[ElementType <: BaseHierarchicalElement] extends HierarchicalElementParams
/** An interface for describing the parameteization of how HierarchicalElements are connected to interconnects */
trait HierarchicalElementCrossingParamsLike {
/** The type of clock crossing that should be inserted at the element boundary. */
def crossingType: ClockCrossingType
/** Parameters describing the contents and behavior of the point where the element is attached as an interconnect master. */
def master: HierarchicalElementPortParamsLike
/** Parameters describing the contents and behavior of the point where the element is attached as an interconnect slave. */
def slave: HierarchicalElementPortParamsLike
/** The subnetwork location of the device selecting the apparent base address of MMIO devices inside the element */
def mmioBaseAddressPrefixWhere: TLBusWrapperLocation
/** Inject a reset management subgraph that effects the element child reset only */
def resetCrossingType: ResetCrossingType
/** Keep the element clock separate from the interconnect clock (e.g. even if they are synchronous to one another) */
def forceSeparateClockReset: Boolean
}
/** An interface for describing the parameterization of how a particular element port is connected to an interconnect */
trait HierarchicalElementPortParamsLike {
/** The subnetwork location of the interconnect to which this element port should be connected. */
def where: TLBusWrapperLocation
/** Allows port-specific adapters to be injected into the interconnect side of the attachment point. */
def injectNode(context: Attachable)(implicit p: Parameters): TLNode
}
abstract class BaseHierarchicalElement (val crossing: ClockCrossingType)(implicit p: Parameters)
extends LazyModule()(p)
with CrossesToOnlyOneClockDomain
{
def module: BaseHierarchicalElementModuleImp[BaseHierarchicalElement]
protected val tlOtherMastersNode = TLIdentityNode()
protected val tlMasterXbar = LazyModule(new TLXbar(nameSuffix = Some(s"MasterXbar_$desiredName")))
protected val tlSlaveXbar = LazyModule(new TLXbar(nameSuffix = Some(s"SlaveXbar_$desiredName")))
protected val intXbar = LazyModule(new IntXbar)
def masterNode: TLOutwardNode
def slaveNode: TLInwardNode
/** Helper function to insert additional buffers on master ports at the boundary of the tile.
*
* The boundary buffering needed to cut feed-through paths is
* microarchitecture specific, so this may need to be overridden
* in subclasses of this class.
*/
def makeMasterBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = TLBuffer(BufferParams.none)
/** Helper function to insert additional buffers on slave ports at the boundary of the tile.
*
* The boundary buffering needed to cut feed-through paths is
* microarchitecture specific, so this may need to be overridden
* in subclasses of this class.
*/
def makeSlaveBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = TLBuffer(BufferParams.none)
}
abstract class BaseHierarchicalElementModuleImp[+L <: BaseHierarchicalElement](val outer: L) extends LazyModuleImp(outer)
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File RocketTile.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.tile
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.devices.tilelink.{BasicBusBlockerParams, BasicBusBlocker}
import freechips.rocketchip.diplomacy.{
AddressSet, DisableMonitors, BufferParams
}
import freechips.rocketchip.resources.{
SimpleDevice, Description,
ResourceAnchors, ResourceBindings, ResourceBinding, Resource, ResourceAddress,
}
import freechips.rocketchip.interrupts.IntIdentityNode
import freechips.rocketchip.tilelink.{TLIdentityNode, TLBuffer}
import freechips.rocketchip.rocket.{
RocketCoreParams, ICacheParams, DCacheParams, BTBParams, HasHellaCache,
HasICacheFrontend, ScratchpadSlavePort, HasICacheFrontendModule, Rocket
}
import freechips.rocketchip.subsystem.HierarchicalElementCrossingParamsLike
import freechips.rocketchip.prci.{ClockSinkParameters, RationalCrossing, ClockCrossingType}
import freechips.rocketchip.util.{Annotated, InOrderArbiter}
import freechips.rocketchip.util.BooleanToAugmentedBoolean
case class RocketTileBoundaryBufferParams(force: Boolean = false)
case class RocketTileParams(
core: RocketCoreParams = RocketCoreParams(),
icache: Option[ICacheParams] = Some(ICacheParams()),
dcache: Option[DCacheParams] = Some(DCacheParams()),
btb: Option[BTBParams] = Some(BTBParams()),
dataScratchpadBytes: Int = 0,
tileId: Int = 0,
beuAddr: Option[BigInt] = None,
blockerCtrlAddr: Option[BigInt] = None,
clockSinkParams: ClockSinkParameters = ClockSinkParameters(),
boundaryBuffers: Option[RocketTileBoundaryBufferParams] = None
) extends InstantiableTileParams[RocketTile] {
require(icache.isDefined)
require(dcache.isDefined)
val baseName = "rockettile"
val uniqueName = s"${baseName}_$tileId"
def instantiate(crossing: HierarchicalElementCrossingParamsLike, lookup: LookupByHartIdImpl)(implicit p: Parameters): RocketTile = {
new RocketTile(this, crossing, lookup)
}
}
class RocketTile private(
val rocketParams: RocketTileParams,
crossing: ClockCrossingType,
lookup: LookupByHartIdImpl,
q: Parameters)
extends BaseTile(rocketParams, crossing, lookup, q)
with SinksExternalInterrupts
with SourcesExternalNotifications
with HasLazyRoCC // implies CanHaveSharedFPU with CanHavePTW with HasHellaCache
with HasHellaCache
with HasICacheFrontend
{
// Private constructor ensures altered LazyModule.p is used implicitly
def this(params: RocketTileParams, crossing: HierarchicalElementCrossingParamsLike, lookup: LookupByHartIdImpl)(implicit p: Parameters) =
this(params, crossing.crossingType, lookup, p)
val intOutwardNode = rocketParams.beuAddr map { _ => IntIdentityNode() }
val slaveNode = TLIdentityNode()
val masterNode = visibilityNode
val dtim_adapter = tileParams.dcache.flatMap { d => d.scratch.map { s =>
LazyModule(new ScratchpadSlavePort(AddressSet.misaligned(s, d.dataScratchpadBytes), lazyCoreParamsView.coreDataBytes, tileParams.core.useAtomics && !tileParams.core.useAtomicsOnlyForIO))
}}
dtim_adapter.foreach(lm => connectTLSlave(lm.node, lm.node.portParams.head.beatBytes))
val bus_error_unit = rocketParams.beuAddr map { a =>
val beu = LazyModule(new BusErrorUnit(new L1BusErrors, BusErrorUnitParams(a), xLen/8))
intOutwardNode.get := beu.intNode
connectTLSlave(beu.node, xBytes)
beu
}
val tile_master_blocker =
tileParams.blockerCtrlAddr
.map(BasicBusBlockerParams(_, xBytes, masterPortBeatBytes, deadlock = true))
.map(bp => LazyModule(new BasicBusBlocker(bp)))
tile_master_blocker.foreach(lm => connectTLSlave(lm.controlNode, xBytes))
// TODO: this doesn't block other masters, e.g. RoCCs
tlOtherMastersNode := tile_master_blocker.map { _.node := tlMasterXbar.node } getOrElse { tlMasterXbar.node }
masterNode :=* tlOtherMastersNode
DisableMonitors { implicit p => tlSlaveXbar.node :*= slaveNode }
nDCachePorts += 1 /*core */ + (dtim_adapter.isDefined).toInt + rocketParams.core.vector.map(_.useDCache.toInt).getOrElse(0)
val dtimProperty = dtim_adapter.map(d => Map(
"sifive,dtim" -> d.device.asProperty)).getOrElse(Nil)
val itimProperty = frontend.icache.itimProperty.toSeq.flatMap(p => Map("sifive,itim" -> p))
val beuProperty = bus_error_unit.map(d => Map(
"sifive,buserror" -> d.device.asProperty)).getOrElse(Nil)
val cpuDevice: SimpleDevice = new SimpleDevice("cpu", Seq("sifive,rocket0", "riscv")) {
override def parent = Some(ResourceAnchors.cpus)
override def describe(resources: ResourceBindings): Description = {
val Description(name, mapping) = super.describe(resources)
Description(name, mapping ++ cpuProperties ++ nextLevelCacheProperty
++ tileProperties ++ dtimProperty ++ itimProperty ++ beuProperty)
}
}
val vector_unit = rocketParams.core.vector.map(v => LazyModule(v.build(p)))
vector_unit.foreach(vu => tlMasterXbar.node :=* vu.atlNode)
vector_unit.foreach(vu => tlOtherMastersNode :=* vu.tlNode)
ResourceBinding {
Resource(cpuDevice, "reg").bind(ResourceAddress(tileId))
}
override lazy val module = new RocketTileModuleImp(this)
override def makeMasterBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = (rocketParams.boundaryBuffers, crossing) match {
case (Some(RocketTileBoundaryBufferParams(true )), _) => TLBuffer()
case (Some(RocketTileBoundaryBufferParams(false)), _: RationalCrossing) => TLBuffer(BufferParams.none, BufferParams.flow, BufferParams.none, BufferParams.flow, BufferParams(1))
case _ => TLBuffer(BufferParams.none)
}
override def makeSlaveBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = (rocketParams.boundaryBuffers, crossing) match {
case (Some(RocketTileBoundaryBufferParams(true )), _) => TLBuffer()
case (Some(RocketTileBoundaryBufferParams(false)), _: RationalCrossing) => TLBuffer(BufferParams.flow, BufferParams.none, BufferParams.none, BufferParams.none, BufferParams.none)
case _ => TLBuffer(BufferParams.none)
}
}
class RocketTileModuleImp(outer: RocketTile) extends BaseTileModuleImp(outer)
with HasFpuOpt
with HasLazyRoCCModule
with HasICacheFrontendModule {
Annotated.params(this, outer.rocketParams)
val core = Module(new Rocket(outer)(outer.p))
outer.vector_unit.foreach { v =>
core.io.vector.get <> v.module.io.core
v.module.io.tlb <> outer.dcache.module.io.tlb_port
}
// reset vector is connected in the Frontend to s2_pc
core.io.reset_vector := DontCare
// Report unrecoverable error conditions; for now the only cause is cache ECC errors
outer.reportHalt(List(outer.dcache.module.io.errors))
// Report when the tile has ceased to retire instructions; for now the only cause is clock gating
outer.reportCease(outer.rocketParams.core.clockGate.option(
!outer.dcache.module.io.cpu.clock_enabled &&
!outer.frontend.module.io.cpu.clock_enabled &&
!ptw.io.dpath.clock_enabled &&
core.io.cease))
outer.reportWFI(Some(core.io.wfi))
outer.decodeCoreInterrupts(core.io.interrupts) // Decode the interrupt vector
outer.bus_error_unit.foreach { beu =>
core.io.interrupts.buserror.get := beu.module.io.interrupt
beu.module.io.errors.dcache := outer.dcache.module.io.errors
beu.module.io.errors.icache := outer.frontend.module.io.errors
}
core.io.interrupts.nmi.foreach { nmi => nmi := outer.nmiSinkNode.get.bundle }
// Pass through various external constants and reports that were bundle-bridged into the tile
outer.traceSourceNode.bundle <> core.io.trace
core.io.traceStall := outer.traceAuxSinkNode.bundle.stall
outer.bpwatchSourceNode.bundle <> core.io.bpwatch
core.io.hartid := outer.hartIdSinkNode.bundle
require(core.io.hartid.getWidth >= outer.hartIdSinkNode.bundle.getWidth,
s"core hartid wire (${core.io.hartid.getWidth}b) truncates external hartid wire (${outer.hartIdSinkNode.bundle.getWidth}b)")
// Connect the core pipeline to other intra-tile modules
outer.frontend.module.io.cpu <> core.io.imem
dcachePorts += core.io.dmem // TODO outer.dcachePorts += () => module.core.io.dmem ??
fpuOpt foreach { fpu =>
core.io.fpu :<>= fpu.io.waiveAs[FPUCoreIO](_.cp_req, _.cp_resp)
}
if (fpuOpt.isEmpty) {
core.io.fpu := DontCare
}
outer.vector_unit foreach { v => if (outer.rocketParams.core.vector.get.useDCache) {
dcachePorts += v.module.io.dmem
} else {
v.module.io.dmem := DontCare
} }
core.io.ptw <> ptw.io.dpath
// Connect the coprocessor interfaces
if (outer.roccs.size > 0) {
cmdRouter.get.io.in <> core.io.rocc.cmd
outer.roccs.foreach{ lm =>
lm.module.io.exception := core.io.rocc.exception
lm.module.io.fpu_req.ready := DontCare
lm.module.io.fpu_resp.valid := DontCare
lm.module.io.fpu_resp.bits.data := DontCare
lm.module.io.fpu_resp.bits.exc := DontCare
}
core.io.rocc.resp <> respArb.get.io.out
core.io.rocc.busy <> (cmdRouter.get.io.busy || outer.roccs.map(_.module.io.busy).reduce(_ || _))
core.io.rocc.interrupt := outer.roccs.map(_.module.io.interrupt).reduce(_ || _)
(core.io.rocc.csrs zip roccCSRIOs.flatten).foreach { t => t._2 <> t._1 }
} else {
// tie off
core.io.rocc.cmd.ready := false.B
core.io.rocc.resp.valid := false.B
core.io.rocc.resp.bits := DontCare
core.io.rocc.busy := DontCare
core.io.rocc.interrupt := DontCare
}
// Dont care mem since not all RoCC need accessing memory
core.io.rocc.mem := DontCare
// Rocket has higher priority to DTIM than other TileLink clients
outer.dtim_adapter.foreach { lm => dcachePorts += lm.module.io.dmem }
// TODO eliminate this redundancy
val h = dcachePorts.size
val c = core.dcacheArbPorts
val o = outer.nDCachePorts
require(h == c, s"port list size was $h, core expected $c")
require(h == o, s"port list size was $h, outer counted $o")
// TODO figure out how to move the below into their respective mix-ins
dcacheArb.io.requestor <> dcachePorts.toSeq
ptw.io.requestor <> ptwPorts.toSeq
}
trait HasFpuOpt { this: RocketTileModuleImp =>
val fpuOpt = outer.tileParams.core.fpu.map(params => Module(new FPU(params)(outer.p)))
fpuOpt.foreach { fpu =>
val nRoCCFPUPorts = outer.roccs.count(_.usesFPU)
val nFPUPorts = nRoCCFPUPorts + outer.rocketParams.core.useVector.toInt
if (nFPUPorts > 0) {
val fpArb = Module(new InOrderArbiter(new FPInput()(outer.p), new FPResult()(outer.p), nFPUPorts))
fpu.io.cp_req <> fpArb.io.out_req
fpArb.io.out_resp <> fpu.io.cp_resp
val fp_rocc_ios = outer.roccs.filter(_.usesFPU).map(_.module.io)
for (i <- 0 until nRoCCFPUPorts) {
fpArb.io.in_req(i) <> fp_rocc_ios(i).fpu_req
fp_rocc_ios(i).fpu_resp <> fpArb.io.in_resp(i)
}
outer.vector_unit.foreach(vu => {
fpArb.io.in_req(nRoCCFPUPorts) <> vu.module.io.fp_req
vu.module.io.fp_resp <> fpArb.io.in_resp(nRoCCFPUPorts)
})
} else {
fpu.io.cp_req.valid := false.B
fpu.io.cp_req.bits := DontCare
fpu.io.cp_resp.ready := false.B
}
}
}
File BundleBridgeNexus.scala:
package org.chipsalliance.diplomacy.bundlebridge
import chisel3.{chiselTypeOf, ActualDirection, Data, Reg}
import chisel3.reflect.DataMirror
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyRawModuleImp}
class BundleBridgeNexus[T <: Data](
inputFn: Seq[T] => T,
outputFn: (T, Int) => Seq[T],
default: Option[() => T] = None,
inputRequiresOutput: Boolean = false,
override val shouldBeInlined: Boolean = true
)(
implicit p: Parameters)
extends LazyModule {
val node = BundleBridgeNexusNode[T](default, inputRequiresOutput)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val defaultWireOpt = default.map(_())
val inputs: Seq[T] = node.in.map(_._1)
inputs.foreach { i =>
require(
DataMirror.checkTypeEquivalence(i, inputs.head),
s"${node.context} requires all inputs have equivalent Chisel Data types, but got\n$i\nvs\n${inputs.head}"
)
}
inputs.flatMap(getElements).foreach { elt =>
DataMirror.directionOf(elt) match {
case ActualDirection.Output => ()
case ActualDirection.Unspecified => ()
case _ => require(false, s"${node.context} can only be used with Output-directed Bundles")
}
}
val outputs: Seq[T] =
if (node.out.size > 0) {
val broadcast: T = if (inputs.size >= 1) inputFn(inputs) else defaultWireOpt.get
outputFn(broadcast, node.out.size)
} else { Nil }
val typeName = outputs.headOption.map(_.typeName).getOrElse("NoOutput")
override def desiredName = s"BundleBridgeNexus_$typeName"
node.out.map(_._1).foreach { o =>
require(
DataMirror.checkTypeEquivalence(o, outputs.head),
s"${node.context} requires all outputs have equivalent Chisel Data types, but got\n$o\nvs\n${outputs.head}"
)
}
require(
outputs.size == node.out.size,
s"${node.context} outputFn must generate one output wire per edgeOut, but got ${outputs.size} vs ${node.out.size}"
)
node.out.zip(outputs).foreach { case ((out, _), bcast) => out := bcast }
}
}
object BundleBridgeNexus {
def safeRegNext[T <: Data](x: T): T = {
val reg = Reg(chiselTypeOf(x))
reg := x
reg
}
def requireOne[T <: Data](registered: Boolean)(seq: Seq[T]): T = {
require(seq.size == 1, "BundleBroadcast default requires one input")
if (registered) safeRegNext(seq.head) else seq.head
}
def orReduction[T <: Data](registered: Boolean)(seq: Seq[T]): T = {
val x = seq.reduce((a, b) => (a.asUInt | b.asUInt).asTypeOf(seq.head))
if (registered) safeRegNext(x) else x
}
def fillN[T <: Data](registered: Boolean)(x: T, n: Int): Seq[T] = Seq.fill(n) {
if (registered) safeRegNext(x) else x
}
def apply[T <: Data](
inputFn: Seq[T] => T = orReduction[T](false) _,
outputFn: (T, Int) => Seq[T] = fillN[T](false) _,
default: Option[() => T] = None,
inputRequiresOutput: Boolean = false,
shouldBeInlined: Boolean = true
)(
implicit p: Parameters
): BundleBridgeNexusNode[T] = {
val nexus = LazyModule(new BundleBridgeNexus[T](inputFn, outputFn, default, inputRequiresOutput, shouldBeInlined))
nexus.node
}
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue}
import freechips.rocketchip.util.BundleField
// Trades off slave port proximity against routing resource cost
object ForceFanout
{
def apply[T](
a: TriStateValue = TriStateValue.unset,
b: TriStateValue = TriStateValue.unset,
c: TriStateValue = TriStateValue.unset,
d: TriStateValue = TriStateValue.unset,
e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) =
{
body(p.alterPartial {
case ForceFanoutKey => p(ForceFanoutKey) match {
case ForceFanoutParams(pa, pb, pc, pd, pe) =>
ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe))
}
})
}
}
private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean)
private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false))
class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
val fifoIdMapper = fifoIdFactory()
port.managers map { manager => manager.v1copy(
fifoId = manager.fifoId.map(fifoIdMapper(_))
)}
}
)
}
){
override def circuitIdentity = outputs.size == 1 && inputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
if ((node.in.size * node.out.size) > (8*32)) {
println (s"!!! WARNING !!!")
println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.")
println (s"!!! WARNING !!!")
}
val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle))
override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_")
TLXbar.circuit(policy, node.in, node.out)
}
}
object TLXbar
{
def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId))
def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId))
def assignRanges(sizes: Seq[Int]) = {
val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) }
val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size
val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions
val ranges = (tuples zip starts) map { case ((sz, i), st) =>
(if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i)
}
ranges.sortBy(_._2).map(_._1) // Restore orignal order
}
def relabeler() = {
var idFactory = 0
() => {
val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int]
(x: Int) => {
if (fifoMap.contains(x)) fifoMap(x) else {
val out = idFactory
idFactory = idFactory + 1
fifoMap += (x -> out)
out
}
}
}
}
def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) {
val (io_in, edgesIn) = seqIn.unzip
val (io_out, edgesOut) = seqOut.unzip
// Not every master need connect to every slave on every channel; determine which connections are necessary
val reachableIO = edgesIn.map { cp => edgesOut.map { mp =>
cp.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)}}}}
}.toVector}.toVector
val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED)
}.toVector}.toVector
val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector}.toVector
val connectAIO = reachableIO
val connectBIO = probeIO
val connectCIO = releaseIO
val connectDIO = reachableIO
val connectEIO = releaseIO
def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } }
val connectAOI = transpose(connectAIO)
val connectBOI = transpose(connectBIO)
val connectCOI = transpose(connectCIO)
val connectDOI = transpose(connectDIO)
val connectEOI = transpose(connectEIO)
// Grab the port ID mapping
val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
// We need an intermediate size of bundle with the widest possible identifiers
val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params))
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
// Transform input bundle sources (sinks use global namespace on both sides)
val in = Wire(Vec(io_in.size, TLBundle(wide_bundle)))
for (i <- 0 until in.size) {
val r = inputIdRanges(i)
if (connectAIO(i).exists(x=>x)) {
in(i).a.bits.user := DontCare
in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll
in(i).a.bits.source := io_in(i).a.bits.source | r.start.U
} else {
in(i).a := DontCare
io_in(i).a := DontCare
in(i).a.valid := false.B
io_in(i).a.ready := true.B
}
if (connectBIO(i).exists(x=>x)) {
io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll
io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size)
} else {
in(i).b := DontCare
io_in(i).b := DontCare
in(i).b.ready := true.B
io_in(i).b.valid := false.B
}
if (connectCIO(i).exists(x=>x)) {
in(i).c.bits.user := DontCare
in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll
in(i).c.bits.source := io_in(i).c.bits.source | r.start.U
} else {
in(i).c := DontCare
io_in(i).c := DontCare
in(i).c.valid := false.B
io_in(i).c.ready := true.B
}
if (connectDIO(i).exists(x=>x)) {
io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll
io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size)
} else {
in(i).d := DontCare
io_in(i).d := DontCare
in(i).d.ready := true.B
io_in(i).d.valid := false.B
}
if (connectEIO(i).exists(x=>x)) {
in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll
} else {
in(i).e := DontCare
io_in(i).e := DontCare
in(i).e.valid := false.B
io_in(i).e.ready := true.B
}
}
// Transform output bundle sinks (sources use global namespace on both sides)
val out = Wire(Vec(io_out.size, TLBundle(wide_bundle)))
for (o <- 0 until out.size) {
val r = outputIdRanges(o)
if (connectAOI(o).exists(x=>x)) {
out(o).a.bits.user := DontCare
io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll
} else {
out(o).a := DontCare
io_out(o).a := DontCare
out(o).a.ready := true.B
io_out(o).a.valid := false.B
}
if (connectBOI(o).exists(x=>x)) {
out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll
} else {
out(o).b := DontCare
io_out(o).b := DontCare
out(o).b.valid := false.B
io_out(o).b.ready := true.B
}
if (connectCOI(o).exists(x=>x)) {
out(o).c.bits.user := DontCare
io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll
} else {
out(o).c := DontCare
io_out(o).c := DontCare
out(o).c.ready := true.B
io_out(o).c.valid := false.B
}
if (connectDOI(o).exists(x=>x)) {
out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll
out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U
} else {
out(o).d := DontCare
io_out(o).d := DontCare
out(o).d.valid := false.B
io_out(o).d.ready := true.B
}
if (connectEOI(o).exists(x=>x)) {
io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll
io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size)
} else {
out(o).e := DontCare
io_out(o).e := DontCare
out(o).e.ready := true.B
io_out(o).e.valid := false.B
}
}
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
// Based on input=>output connectivity, create per-input minimal address decode circuits
val requiredAC = (connectAIO ++ connectCIO).distinct
val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO =>
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
// Print the address mapping
if (false) {
println("Xbar mapping:")
route_addrs.foreach { p =>
print(" ")
p.foreach { a => print(s" ${a}") }
println("")
}
println("--")
}
(connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _)))
}.toMap
// Print the ID mapping
if (false) {
println(s"XBar mapping:")
(edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) =>
println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}")
}
println("")
}
val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) }
val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) }
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } }
val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } }
val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } }
val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) }
val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) }
val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) }
val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) }
val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) }
// Fanout the input sources to the output sinks
val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) })
val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) })
val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) })
val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) })
val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) })
// Arbitrate amongst the sources
for (o <- 0 until out.size) {
TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*)
TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*)
TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*)
filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B }
}
for (i <- 0 until in.size) {
TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*)
TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*)
filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B }
filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B }
}
}
def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val xbar = LazyModule(new TLXbar(policy, nameSuffix))
xbar.node
}
// Replicate an input port to each output port
def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = {
val filtered = Wire(Vec(select.size, chiselTypeOf(input)))
for (i <- 0 until select.size) {
filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits)
filtered(i).valid := input.valid && (select(i) || (select.size == 1).B)
}
input.ready := Mux1H(select, filtered.map(_.ready))
filtered
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Xbar"))
val xbar = LazyModule(new TLXbar)
xbar.node := TLDelayer(0.1) := model.node := fuzz.node
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val xbar = LazyModule(new TLXbar)
val fuzzers = (0 until nClients) map { n =>
val fuzz = LazyModule(new TLFuzzer(txns))
xbar.node := TLDelayer(0.1) := fuzz.node
fuzz
}
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzzers.last.module.io.finished
}
}
class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
| module RocketTile( // @[RocketTile.scala:141:7]
input clock, // @[RocketTile.scala:141:7]
input reset, // @[RocketTile.scala:141:7]
output auto_buffer_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_buffer_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_buffer_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_buffer_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_buffer_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_buffer_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_buffer_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_buffer_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_buffer_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_buffer_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_buffer_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_user_amba_prot_bufferable, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_user_amba_prot_modifiable, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_user_amba_prot_readalloc, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_user_amba_prot_writealloc, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_user_amba_prot_privileged, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_user_amba_prot_secure, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_user_amba_prot_fetch, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_buffer_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_buffer_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_buffer_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_buffer_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_buffer_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_wfi_out_0, // @[LazyModuleImp.scala:107:25]
input auto_int_local_in_2_0, // @[LazyModuleImp.scala:107:25]
input auto_int_local_in_1_0, // @[LazyModuleImp.scala:107:25]
input auto_int_local_in_1_1, // @[LazyModuleImp.scala:107:25]
input auto_int_local_in_0_0, // @[LazyModuleImp.scala:107:25]
output auto_trace_source_out_insns_0_valid, // @[LazyModuleImp.scala:107:25]
output [33:0] auto_trace_source_out_insns_0_iaddr, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_trace_source_out_insns_0_insn, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_trace_source_out_insns_0_priv, // @[LazyModuleImp.scala:107:25]
output auto_trace_source_out_insns_0_exception, // @[LazyModuleImp.scala:107:25]
output auto_trace_source_out_insns_0_interrupt, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_trace_source_out_insns_0_cause, // @[LazyModuleImp.scala:107:25]
output [33:0] auto_trace_source_out_insns_0_tval, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_trace_source_out_time, // @[LazyModuleImp.scala:107:25]
input auto_hartid_in // @[LazyModuleImp.scala:107:25]
);
wire buffer_1_auto_out_d_valid; // @[Buffer.scala:40:9]
wire buffer_1_auto_out_d_ready; // @[Buffer.scala:40:9]
wire [63:0] buffer_1_auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire buffer_1_auto_out_a_valid; // @[Buffer.scala:40:9]
wire buffer_1_auto_out_a_ready; // @[Buffer.scala:40:9]
wire buffer_1_auto_out_a_bits_corrupt; // @[Buffer.scala:40:9]
wire [63:0] buffer_1_auto_out_a_bits_data; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_auto_out_a_bits_mask; // @[Buffer.scala:40:9]
wire [31:0] buffer_1_auto_out_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_auto_out_a_bits_source; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_auto_out_a_bits_size; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_auto_out_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_auto_out_a_bits_opcode; // @[Buffer.scala:40:9]
wire buffer_auto_in_d_valid; // @[Buffer.scala:40:9]
wire buffer_auto_in_d_ready; // @[Buffer.scala:40:9]
wire buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9]
wire [63:0] buffer_auto_in_d_bits_data; // @[Buffer.scala:40:9]
wire buffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9]
wire buffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9]
wire buffer_auto_in_d_bits_source; // @[Buffer.scala:40:9]
wire [3:0] buffer_auto_in_d_bits_size; // @[Buffer.scala:40:9]
wire [1:0] buffer_auto_in_d_bits_param; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_valid; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_ready; // @[Buffer.scala:40:9]
wire [63:0] buffer_auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire [7:0] buffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [31:0] buffer_auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [3:0] buffer_auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_bits_user_amba_prot_fetch; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_bits_user_amba_prot_secure; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_bits_user_amba_prot_privileged; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_bits_user_amba_prot_writealloc; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_bits_user_amba_prot_readalloc; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_bits_user_amba_prot_modifiable; // @[Buffer.scala:40:9]
wire buffer_auto_in_a_bits_user_amba_prot_bufferable; // @[Buffer.scala:40:9]
wire widget_1_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] widget_1_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] widget_1_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire [7:0] widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_privileged; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_modifiable; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_bufferable; // @[WidthWidget.scala:27:9]
wire [2:0] broadcast_2_auto_in_0_action; // @[BundleBridgeNexus.scala:20:9]
wire broadcast_2_auto_in_0_valid_0; // @[BundleBridgeNexus.scala:20:9]
wire broadcast_auto_in; // @[BundleBridgeNexus.scala:20:9]
wire [7:0] tlSlaveXbar_in_0_d_bits_source; // @[Xbar.scala:159:18]
wire [7:0] tlSlaveXbar_in_0_a_bits_source; // @[Xbar.scala:159:18]
wire tlSlaveXbar_auto_anon_out_d_valid; // @[Xbar.scala:74:9]
wire [63:0] tlSlaveXbar_auto_anon_out_d_bits_data; // @[Xbar.scala:74:9]
wire [7:0] tlSlaveXbar_auto_anon_out_d_bits_source; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_auto_anon_out_d_bits_size; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_auto_anon_out_d_bits_opcode; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_out_a_ready; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_in_d_valid; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_in_d_ready; // @[Xbar.scala:74:9]
wire [63:0] tlSlaveXbar_auto_anon_in_d_bits_data; // @[Xbar.scala:74:9]
wire [7:0] tlSlaveXbar_auto_anon_in_d_bits_source; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_auto_anon_in_d_bits_size; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_auto_anon_in_d_bits_opcode; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_in_a_valid; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_in_a_ready; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_in_a_bits_corrupt; // @[Xbar.scala:74:9]
wire [63:0] tlSlaveXbar_auto_anon_in_a_bits_data; // @[Xbar.scala:74:9]
wire [7:0] tlSlaveXbar_auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9]
wire [31:0] tlSlaveXbar_auto_anon_in_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] tlSlaveXbar_auto_anon_in_a_bits_source; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_auto_anon_in_a_bits_size; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_auto_anon_in_a_bits_param; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9]
wire _core_io_imem_might_request; // @[RocketTile.scala:147:20]
wire _core_io_imem_req_valid; // @[RocketTile.scala:147:20]
wire [33:0] _core_io_imem_req_bits_pc; // @[RocketTile.scala:147:20]
wire _core_io_imem_req_bits_speculative; // @[RocketTile.scala:147:20]
wire _core_io_imem_sfence_valid; // @[RocketTile.scala:147:20]
wire _core_io_imem_sfence_bits_rs1; // @[RocketTile.scala:147:20]
wire _core_io_imem_sfence_bits_rs2; // @[RocketTile.scala:147:20]
wire [32:0] _core_io_imem_sfence_bits_addr; // @[RocketTile.scala:147:20]
wire _core_io_imem_sfence_bits_asid; // @[RocketTile.scala:147:20]
wire _core_io_imem_sfence_bits_hv; // @[RocketTile.scala:147:20]
wire _core_io_imem_sfence_bits_hg; // @[RocketTile.scala:147:20]
wire _core_io_imem_resp_ready; // @[RocketTile.scala:147:20]
wire _core_io_imem_btb_update_valid; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_imem_btb_update_bits_prediction_cfiType; // @[RocketTile.scala:147:20]
wire _core_io_imem_btb_update_bits_prediction_taken; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_imem_btb_update_bits_prediction_mask; // @[RocketTile.scala:147:20]
wire _core_io_imem_btb_update_bits_prediction_bridx; // @[RocketTile.scala:147:20]
wire [32:0] _core_io_imem_btb_update_bits_prediction_target; // @[RocketTile.scala:147:20]
wire [4:0] _core_io_imem_btb_update_bits_prediction_entry; // @[RocketTile.scala:147:20]
wire [7:0] _core_io_imem_btb_update_bits_prediction_bht_history; // @[RocketTile.scala:147:20]
wire _core_io_imem_btb_update_bits_prediction_bht_value; // @[RocketTile.scala:147:20]
wire [32:0] _core_io_imem_btb_update_bits_pc; // @[RocketTile.scala:147:20]
wire [32:0] _core_io_imem_btb_update_bits_target; // @[RocketTile.scala:147:20]
wire _core_io_imem_btb_update_bits_isValid; // @[RocketTile.scala:147:20]
wire [32:0] _core_io_imem_btb_update_bits_br_pc; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_imem_btb_update_bits_cfiType; // @[RocketTile.scala:147:20]
wire _core_io_imem_bht_update_valid; // @[RocketTile.scala:147:20]
wire [7:0] _core_io_imem_bht_update_bits_prediction_history; // @[RocketTile.scala:147:20]
wire _core_io_imem_bht_update_bits_prediction_value; // @[RocketTile.scala:147:20]
wire [32:0] _core_io_imem_bht_update_bits_pc; // @[RocketTile.scala:147:20]
wire _core_io_imem_bht_update_bits_branch; // @[RocketTile.scala:147:20]
wire _core_io_imem_bht_update_bits_taken; // @[RocketTile.scala:147:20]
wire _core_io_imem_bht_update_bits_mispredict; // @[RocketTile.scala:147:20]
wire _core_io_imem_flush_icache; // @[RocketTile.scala:147:20]
wire _core_io_imem_progress; // @[RocketTile.scala:147:20]
wire _core_io_dmem_req_valid; // @[RocketTile.scala:147:20]
wire [33:0] _core_io_dmem_req_bits_addr; // @[RocketTile.scala:147:20]
wire [6:0] _core_io_dmem_req_bits_tag; // @[RocketTile.scala:147:20]
wire [4:0] _core_io_dmem_req_bits_cmd; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_dmem_req_bits_size; // @[RocketTile.scala:147:20]
wire _core_io_dmem_req_bits_signed; // @[RocketTile.scala:147:20]
wire _core_io_dmem_req_bits_dv; // @[RocketTile.scala:147:20]
wire _core_io_dmem_req_bits_no_resp; // @[RocketTile.scala:147:20]
wire _core_io_dmem_s1_kill; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_dmem_s1_data_data; // @[RocketTile.scala:147:20]
wire _core_io_dmem_keep_clock_enabled; // @[RocketTile.scala:147:20]
wire _core_io_ptw_sfence_valid; // @[RocketTile.scala:147:20]
wire _core_io_ptw_sfence_bits_rs1; // @[RocketTile.scala:147:20]
wire _core_io_ptw_sfence_bits_rs2; // @[RocketTile.scala:147:20]
wire [32:0] _core_io_ptw_sfence_bits_addr; // @[RocketTile.scala:147:20]
wire _core_io_ptw_sfence_bits_asid; // @[RocketTile.scala:147:20]
wire _core_io_ptw_sfence_bits_hv; // @[RocketTile.scala:147:20]
wire _core_io_ptw_sfence_bits_hg; // @[RocketTile.scala:147:20]
wire _core_io_ptw_status_debug; // @[RocketTile.scala:147:20]
wire _core_io_ptw_status_cease; // @[RocketTile.scala:147:20]
wire _core_io_ptw_status_wfi; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_ptw_status_isa; // @[RocketTile.scala:147:20]
wire _core_io_ptw_status_dv; // @[RocketTile.scala:147:20]
wire _core_io_ptw_status_v; // @[RocketTile.scala:147:20]
wire _core_io_ptw_status_sd; // @[RocketTile.scala:147:20]
wire _core_io_ptw_status_mpv; // @[RocketTile.scala:147:20]
wire _core_io_ptw_status_gva; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_status_fs; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_status_mpp; // @[RocketTile.scala:147:20]
wire _core_io_ptw_status_mpie; // @[RocketTile.scala:147:20]
wire _core_io_ptw_status_mie; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_debug; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_cease; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_wfi; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_ptw_gstatus_isa; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_gstatus_dprv; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_dv; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_gstatus_prv; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_v; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_sd; // @[RocketTile.scala:147:20]
wire [22:0] _core_io_ptw_gstatus_zero2; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_mpv; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_gva; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_mbe; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_sbe; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_gstatus_sxl; // @[RocketTile.scala:147:20]
wire [7:0] _core_io_ptw_gstatus_zero1; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_tsr; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_tw; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_tvm; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_mxr; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_sum; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_mprv; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_gstatus_fs; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_gstatus_mpp; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_gstatus_vs; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_spp; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_mpie; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_ube; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_spie; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_upie; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_mie; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_hie; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_sie; // @[RocketTile.scala:147:20]
wire _core_io_ptw_gstatus_uie; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_0_cfg_l; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_pmp_0_cfg_a; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_0_cfg_x; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_0_cfg_w; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_0_cfg_r; // @[RocketTile.scala:147:20]
wire [29:0] _core_io_ptw_pmp_0_addr; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_ptw_pmp_0_mask; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_1_cfg_l; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_pmp_1_cfg_a; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_1_cfg_x; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_1_cfg_w; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_1_cfg_r; // @[RocketTile.scala:147:20]
wire [29:0] _core_io_ptw_pmp_1_addr; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_ptw_pmp_1_mask; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_2_cfg_l; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_pmp_2_cfg_a; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_2_cfg_x; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_2_cfg_w; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_2_cfg_r; // @[RocketTile.scala:147:20]
wire [29:0] _core_io_ptw_pmp_2_addr; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_ptw_pmp_2_mask; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_3_cfg_l; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_pmp_3_cfg_a; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_3_cfg_x; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_3_cfg_w; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_3_cfg_r; // @[RocketTile.scala:147:20]
wire [29:0] _core_io_ptw_pmp_3_addr; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_ptw_pmp_3_mask; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_4_cfg_l; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_pmp_4_cfg_a; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_4_cfg_x; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_4_cfg_w; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_4_cfg_r; // @[RocketTile.scala:147:20]
wire [29:0] _core_io_ptw_pmp_4_addr; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_ptw_pmp_4_mask; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_5_cfg_l; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_pmp_5_cfg_a; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_5_cfg_x; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_5_cfg_w; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_5_cfg_r; // @[RocketTile.scala:147:20]
wire [29:0] _core_io_ptw_pmp_5_addr; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_ptw_pmp_5_mask; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_6_cfg_l; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_pmp_6_cfg_a; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_6_cfg_x; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_6_cfg_w; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_6_cfg_r; // @[RocketTile.scala:147:20]
wire [29:0] _core_io_ptw_pmp_6_addr; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_ptw_pmp_6_mask; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_7_cfg_l; // @[RocketTile.scala:147:20]
wire [1:0] _core_io_ptw_pmp_7_cfg_a; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_7_cfg_x; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_7_cfg_w; // @[RocketTile.scala:147:20]
wire _core_io_ptw_pmp_7_cfg_r; // @[RocketTile.scala:147:20]
wire [29:0] _core_io_ptw_pmp_7_addr; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_ptw_pmp_7_mask; // @[RocketTile.scala:147:20]
wire _core_io_ptw_customCSRs_csrs_0_ren; // @[RocketTile.scala:147:20]
wire _core_io_ptw_customCSRs_csrs_0_wen; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_ptw_customCSRs_csrs_0_wdata; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_ptw_customCSRs_csrs_0_value; // @[RocketTile.scala:147:20]
wire _core_io_ptw_customCSRs_csrs_1_ren; // @[RocketTile.scala:147:20]
wire _core_io_ptw_customCSRs_csrs_1_wen; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_ptw_customCSRs_csrs_1_wdata; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_ptw_customCSRs_csrs_1_value; // @[RocketTile.scala:147:20]
wire _core_io_ptw_customCSRs_csrs_2_ren; // @[RocketTile.scala:147:20]
wire _core_io_ptw_customCSRs_csrs_2_wen; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_ptw_customCSRs_csrs_2_wdata; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_ptw_customCSRs_csrs_2_value; // @[RocketTile.scala:147:20]
wire _core_io_ptw_customCSRs_csrs_3_ren; // @[RocketTile.scala:147:20]
wire _core_io_ptw_customCSRs_csrs_3_wen; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_ptw_customCSRs_csrs_3_wdata; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_ptw_customCSRs_csrs_3_value; // @[RocketTile.scala:147:20]
wire _core_io_fpu_hartid; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_fpu_time; // @[RocketTile.scala:147:20]
wire [31:0] _core_io_fpu_inst; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_fpu_fromint_data; // @[RocketTile.scala:147:20]
wire [2:0] _core_io_fpu_fcsr_rm; // @[RocketTile.scala:147:20]
wire _core_io_fpu_ll_resp_val; // @[RocketTile.scala:147:20]
wire [2:0] _core_io_fpu_ll_resp_type; // @[RocketTile.scala:147:20]
wire [4:0] _core_io_fpu_ll_resp_tag; // @[RocketTile.scala:147:20]
wire [63:0] _core_io_fpu_ll_resp_data; // @[RocketTile.scala:147:20]
wire _core_io_fpu_valid; // @[RocketTile.scala:147:20]
wire _core_io_fpu_killx; // @[RocketTile.scala:147:20]
wire _core_io_fpu_killm; // @[RocketTile.scala:147:20]
wire _core_io_fpu_keep_clock_enabled; // @[RocketTile.scala:147:20]
wire _core_io_wfi; // @[RocketTile.scala:147:20]
wire _ptw_io_requestor_0_req_ready; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_valid; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_ae_ptw; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_ae_final; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_pf; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_gf; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_hr; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_hw; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_hx; // @[PTW.scala:802:19]
wire [9:0] _ptw_io_requestor_0_resp_bits_pte_reserved_for_future; // @[PTW.scala:802:19]
wire [43:0] _ptw_io_requestor_0_resp_bits_pte_ppn; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_resp_bits_pte_reserved_for_software; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_pte_d; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_pte_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_pte_g; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_pte_u; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_pte_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_pte_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_pte_r; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_pte_v; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_resp_bits_level; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_homogeneous; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_gpa_valid; // @[PTW.scala:802:19]
wire [32:0] _ptw_io_requestor_0_resp_bits_gpa_bits; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_resp_bits_gpa_is_pte; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_status_debug; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_status_cease; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_status_wfi; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_0_status_isa; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_status_dv; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_status_v; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_status_sd; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_status_mpv; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_status_gva; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_status_fs; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_status_mpp; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_status_mpie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_status_mie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_debug; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_cease; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_wfi; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_0_gstatus_isa; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_gstatus_dprv; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_dv; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_gstatus_prv; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_v; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_sd; // @[PTW.scala:802:19]
wire [22:0] _ptw_io_requestor_0_gstatus_zero2; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_mpv; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_gva; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_mbe; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_sbe; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_gstatus_sxl; // @[PTW.scala:802:19]
wire [7:0] _ptw_io_requestor_0_gstatus_zero1; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_tsr; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_tw; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_tvm; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_mxr; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_sum; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_mprv; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_gstatus_fs; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_gstatus_mpp; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_gstatus_vs; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_spp; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_mpie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_ube; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_spie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_upie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_mie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_hie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_sie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_gstatus_uie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_0_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_pmp_0_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_0_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_0_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_0_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_0_pmp_0_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_0_pmp_0_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_1_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_pmp_1_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_1_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_1_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_1_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_0_pmp_1_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_0_pmp_1_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_2_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_pmp_2_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_2_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_2_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_2_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_0_pmp_2_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_0_pmp_2_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_3_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_pmp_3_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_3_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_3_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_3_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_0_pmp_3_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_0_pmp_3_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_4_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_pmp_4_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_4_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_4_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_4_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_0_pmp_4_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_0_pmp_4_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_5_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_pmp_5_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_5_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_5_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_5_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_0_pmp_5_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_0_pmp_5_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_6_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_pmp_6_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_6_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_6_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_6_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_0_pmp_6_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_0_pmp_6_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_7_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_0_pmp_7_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_7_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_7_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_pmp_7_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_0_pmp_7_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_0_pmp_7_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_customCSRs_csrs_0_ren; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_customCSRs_csrs_0_wen; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_0_wdata; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_0_value; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_customCSRs_csrs_1_ren; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_customCSRs_csrs_1_wen; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_1_wdata; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_1_value; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_customCSRs_csrs_2_ren; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_customCSRs_csrs_2_wen; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_2_wdata; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_2_value; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_customCSRs_csrs_3_ren; // @[PTW.scala:802:19]
wire _ptw_io_requestor_0_customCSRs_csrs_3_wen; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_3_wdata; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_3_value; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_req_ready; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_valid; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_ae_ptw; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_ae_final; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_pf; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_gf; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_hr; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_hw; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_hx; // @[PTW.scala:802:19]
wire [9:0] _ptw_io_requestor_1_resp_bits_pte_reserved_for_future; // @[PTW.scala:802:19]
wire [43:0] _ptw_io_requestor_1_resp_bits_pte_ppn; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_resp_bits_pte_reserved_for_software; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_pte_d; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_pte_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_pte_g; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_pte_u; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_pte_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_pte_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_pte_r; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_pte_v; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_resp_bits_level; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_homogeneous; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_gpa_valid; // @[PTW.scala:802:19]
wire [32:0] _ptw_io_requestor_1_resp_bits_gpa_bits; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_resp_bits_gpa_is_pte; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_status_debug; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_status_cease; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_status_wfi; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_1_status_isa; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_status_dv; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_status_v; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_status_sd; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_status_mpv; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_status_gva; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_status_fs; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_status_mpp; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_status_mpie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_status_mie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_debug; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_cease; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_wfi; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_1_gstatus_isa; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_gstatus_dprv; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_dv; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_gstatus_prv; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_v; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_sd; // @[PTW.scala:802:19]
wire [22:0] _ptw_io_requestor_1_gstatus_zero2; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_mpv; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_gva; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_mbe; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_sbe; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_gstatus_sxl; // @[PTW.scala:802:19]
wire [7:0] _ptw_io_requestor_1_gstatus_zero1; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_tsr; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_tw; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_tvm; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_mxr; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_sum; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_mprv; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_gstatus_fs; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_gstatus_mpp; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_gstatus_vs; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_spp; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_mpie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_ube; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_spie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_upie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_mie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_hie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_sie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_gstatus_uie; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_0_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_pmp_0_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_0_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_0_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_0_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_1_pmp_0_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_1_pmp_0_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_1_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_pmp_1_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_1_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_1_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_1_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_1_pmp_1_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_1_pmp_1_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_2_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_pmp_2_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_2_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_2_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_2_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_1_pmp_2_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_1_pmp_2_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_3_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_pmp_3_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_3_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_3_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_3_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_1_pmp_3_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_1_pmp_3_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_4_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_pmp_4_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_4_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_4_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_4_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_1_pmp_4_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_1_pmp_4_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_5_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_pmp_5_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_5_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_5_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_5_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_1_pmp_5_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_1_pmp_5_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_6_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_pmp_6_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_6_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_6_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_6_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_1_pmp_6_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_1_pmp_6_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_7_cfg_l; // @[PTW.scala:802:19]
wire [1:0] _ptw_io_requestor_1_pmp_7_cfg_a; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_7_cfg_x; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_7_cfg_w; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_pmp_7_cfg_r; // @[PTW.scala:802:19]
wire [29:0] _ptw_io_requestor_1_pmp_7_addr; // @[PTW.scala:802:19]
wire [31:0] _ptw_io_requestor_1_pmp_7_mask; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_customCSRs_csrs_0_ren; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_customCSRs_csrs_0_wen; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_0_wdata; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_0_value; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_customCSRs_csrs_1_ren; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_customCSRs_csrs_1_wen; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_1_wdata; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_1_value; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_customCSRs_csrs_2_ren; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_customCSRs_csrs_2_wen; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_2_wdata; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_2_value; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_customCSRs_csrs_3_ren; // @[PTW.scala:802:19]
wire _ptw_io_requestor_1_customCSRs_csrs_3_wen; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_3_wdata; // @[PTW.scala:802:19]
wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_3_value; // @[PTW.scala:802:19]
wire _ptw_io_dpath_perf_pte_miss; // @[PTW.scala:802:19]
wire _ptw_io_dpath_perf_pte_hit; // @[PTW.scala:802:19]
wire _dcacheArb_io_requestor_0_req_ready; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_s2_nack; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_s2_nack_cause_raw; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_s2_uncached; // @[HellaCache.scala:292:25]
wire [31:0] _dcacheArb_io_requestor_0_s2_paddr; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_resp_valid; // @[HellaCache.scala:292:25]
wire [33:0] _dcacheArb_io_requestor_0_resp_bits_addr; // @[HellaCache.scala:292:25]
wire [6:0] _dcacheArb_io_requestor_0_resp_bits_tag; // @[HellaCache.scala:292:25]
wire [4:0] _dcacheArb_io_requestor_0_resp_bits_cmd; // @[HellaCache.scala:292:25]
wire [1:0] _dcacheArb_io_requestor_0_resp_bits_size; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_resp_bits_signed; // @[HellaCache.scala:292:25]
wire [1:0] _dcacheArb_io_requestor_0_resp_bits_dprv; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_resp_bits_dv; // @[HellaCache.scala:292:25]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data; // @[HellaCache.scala:292:25]
wire [7:0] _dcacheArb_io_requestor_0_resp_bits_mask; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_resp_bits_replay; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_resp_bits_has_data; // @[HellaCache.scala:292:25]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data_word_bypass; // @[HellaCache.scala:292:25]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data_raw; // @[HellaCache.scala:292:25]
wire [63:0] _dcacheArb_io_requestor_0_resp_bits_store_data; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_replay_next; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_s2_xcpt_ma_ld; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_s2_xcpt_ma_st; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_s2_xcpt_pf_ld; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_s2_xcpt_pf_st; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_s2_xcpt_ae_ld; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_s2_xcpt_ae_st; // @[HellaCache.scala:292:25]
wire [33:0] _dcacheArb_io_requestor_0_s2_gpa; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_ordered; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_store_pending; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_perf_acquire; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_perf_grant; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_perf_blocked; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_req_ready; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_s2_nack; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_s2_nack_cause_raw; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_s2_uncached; // @[HellaCache.scala:292:25]
wire [31:0] _dcacheArb_io_requestor_1_s2_paddr; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_resp_valid; // @[HellaCache.scala:292:25]
wire [33:0] _dcacheArb_io_requestor_1_resp_bits_addr; // @[HellaCache.scala:292:25]
wire [6:0] _dcacheArb_io_requestor_1_resp_bits_tag; // @[HellaCache.scala:292:25]
wire [4:0] _dcacheArb_io_requestor_1_resp_bits_cmd; // @[HellaCache.scala:292:25]
wire [1:0] _dcacheArb_io_requestor_1_resp_bits_size; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_resp_bits_signed; // @[HellaCache.scala:292:25]
wire [1:0] _dcacheArb_io_requestor_1_resp_bits_dprv; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_resp_bits_dv; // @[HellaCache.scala:292:25]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data; // @[HellaCache.scala:292:25]
wire [7:0] _dcacheArb_io_requestor_1_resp_bits_mask; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_resp_bits_replay; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_resp_bits_has_data; // @[HellaCache.scala:292:25]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data_word_bypass; // @[HellaCache.scala:292:25]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data_raw; // @[HellaCache.scala:292:25]
wire [63:0] _dcacheArb_io_requestor_1_resp_bits_store_data; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_replay_next; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_s2_xcpt_ma_ld; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_s2_xcpt_ma_st; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_s2_xcpt_pf_ld; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_s2_xcpt_pf_st; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_s2_xcpt_ae_ld; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_s2_xcpt_ae_st; // @[HellaCache.scala:292:25]
wire [33:0] _dcacheArb_io_requestor_1_s2_gpa; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_ordered; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_store_pending; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_perf_acquire; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_perf_grant; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_perf_blocked; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_mem_req_valid; // @[HellaCache.scala:292:25]
wire [33:0] _dcacheArb_io_mem_req_bits_addr; // @[HellaCache.scala:292:25]
wire [6:0] _dcacheArb_io_mem_req_bits_tag; // @[HellaCache.scala:292:25]
wire [4:0] _dcacheArb_io_mem_req_bits_cmd; // @[HellaCache.scala:292:25]
wire [1:0] _dcacheArb_io_mem_req_bits_size; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_mem_req_bits_signed; // @[HellaCache.scala:292:25]
wire [1:0] _dcacheArb_io_mem_req_bits_dprv; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_mem_req_bits_dv; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_mem_req_bits_phys; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_mem_req_bits_no_resp; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_mem_req_bits_no_xcpt; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_mem_s1_kill; // @[HellaCache.scala:292:25]
wire [63:0] _dcacheArb_io_mem_s1_data_data; // @[HellaCache.scala:292:25]
wire [7:0] _dcacheArb_io_mem_s1_data_mask; // @[HellaCache.scala:292:25]
wire _dcacheArb_io_mem_keep_clock_enabled; // @[HellaCache.scala:292:25]
wire _fpuOpt_io_fcsr_flags_valid; // @[RocketTile.scala:242:62]
wire [4:0] _fpuOpt_io_fcsr_flags_bits; // @[RocketTile.scala:242:62]
wire [63:0] _fpuOpt_io_store_data; // @[RocketTile.scala:242:62]
wire [63:0] _fpuOpt_io_toint_data; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_fcsr_rdy; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_nack_mem; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_illegal_rm; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_ldst; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_wen; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_ren1; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_ren2; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_ren3; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_swap12; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_swap23; // @[RocketTile.scala:242:62]
wire [1:0] _fpuOpt_io_dec_typeTagIn; // @[RocketTile.scala:242:62]
wire [1:0] _fpuOpt_io_dec_typeTagOut; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_fromint; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_toint; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_fastpipe; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_fma; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_div; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_sqrt; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_wflags; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_dec_vec; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_sboard_set; // @[RocketTile.scala:242:62]
wire _fpuOpt_io_sboard_clr; // @[RocketTile.scala:242:62]
wire [4:0] _fpuOpt_io_sboard_clra; // @[RocketTile.scala:242:62]
wire _fragmenter_1_auto_anon_out_a_valid; // @[Fragmenter.scala:345:34]
wire [2:0] _fragmenter_1_auto_anon_out_a_bits_opcode; // @[Fragmenter.scala:345:34]
wire [2:0] _fragmenter_1_auto_anon_out_a_bits_param; // @[Fragmenter.scala:345:34]
wire [1:0] _fragmenter_1_auto_anon_out_a_bits_size; // @[Fragmenter.scala:345:34]
wire [12:0] _fragmenter_1_auto_anon_out_a_bits_source; // @[Fragmenter.scala:345:34]
wire [31:0] _fragmenter_1_auto_anon_out_a_bits_address; // @[Fragmenter.scala:345:34]
wire [7:0] _fragmenter_1_auto_anon_out_a_bits_mask; // @[Fragmenter.scala:345:34]
wire [63:0] _fragmenter_1_auto_anon_out_a_bits_data; // @[Fragmenter.scala:345:34]
wire _fragmenter_1_auto_anon_out_a_bits_corrupt; // @[Fragmenter.scala:345:34]
wire _fragmenter_1_auto_anon_out_d_ready; // @[Fragmenter.scala:345:34]
wire _dtim_adapter_auto_in_a_ready; // @[RocketTile.scala:75:15]
wire _dtim_adapter_auto_in_d_valid; // @[RocketTile.scala:75:15]
wire [2:0] _dtim_adapter_auto_in_d_bits_opcode; // @[RocketTile.scala:75:15]
wire [1:0] _dtim_adapter_auto_in_d_bits_size; // @[RocketTile.scala:75:15]
wire [12:0] _dtim_adapter_auto_in_d_bits_source; // @[RocketTile.scala:75:15]
wire [63:0] _dtim_adapter_auto_in_d_bits_data; // @[RocketTile.scala:75:15]
wire _dtim_adapter_io_dmem_req_valid; // @[RocketTile.scala:75:15]
wire [33:0] _dtim_adapter_io_dmem_req_bits_addr; // @[RocketTile.scala:75:15]
wire [4:0] _dtim_adapter_io_dmem_req_bits_cmd; // @[RocketTile.scala:75:15]
wire [1:0] _dtim_adapter_io_dmem_req_bits_size; // @[RocketTile.scala:75:15]
wire _dtim_adapter_io_dmem_s1_kill; // @[RocketTile.scala:75:15]
wire [63:0] _dtim_adapter_io_dmem_s1_data_data; // @[RocketTile.scala:75:15]
wire [7:0] _dtim_adapter_io_dmem_s1_data_mask; // @[RocketTile.scala:75:15]
wire _frontend_io_cpu_resp_valid; // @[Frontend.scala:393:28]
wire [1:0] _frontend_io_cpu_resp_bits_btb_cfiType; // @[Frontend.scala:393:28]
wire _frontend_io_cpu_resp_bits_btb_taken; // @[Frontend.scala:393:28]
wire [1:0] _frontend_io_cpu_resp_bits_btb_mask; // @[Frontend.scala:393:28]
wire _frontend_io_cpu_resp_bits_btb_bridx; // @[Frontend.scala:393:28]
wire [32:0] _frontend_io_cpu_resp_bits_btb_target; // @[Frontend.scala:393:28]
wire [4:0] _frontend_io_cpu_resp_bits_btb_entry; // @[Frontend.scala:393:28]
wire [7:0] _frontend_io_cpu_resp_bits_btb_bht_history; // @[Frontend.scala:393:28]
wire _frontend_io_cpu_resp_bits_btb_bht_value; // @[Frontend.scala:393:28]
wire [33:0] _frontend_io_cpu_resp_bits_pc; // @[Frontend.scala:393:28]
wire [31:0] _frontend_io_cpu_resp_bits_data; // @[Frontend.scala:393:28]
wire [1:0] _frontend_io_cpu_resp_bits_mask; // @[Frontend.scala:393:28]
wire _frontend_io_cpu_resp_bits_xcpt_pf_inst; // @[Frontend.scala:393:28]
wire _frontend_io_cpu_resp_bits_xcpt_gf_inst; // @[Frontend.scala:393:28]
wire _frontend_io_cpu_resp_bits_xcpt_ae_inst; // @[Frontend.scala:393:28]
wire _frontend_io_cpu_resp_bits_replay; // @[Frontend.scala:393:28]
wire _frontend_io_cpu_gpa_valid; // @[Frontend.scala:393:28]
wire [33:0] _frontend_io_cpu_gpa_bits; // @[Frontend.scala:393:28]
wire _frontend_io_cpu_gpa_is_pte; // @[Frontend.scala:393:28]
wire [33:0] _frontend_io_cpu_npc; // @[Frontend.scala:393:28]
wire _frontend_io_cpu_perf_acquire; // @[Frontend.scala:393:28]
wire _frontend_io_ptw_req_bits_valid; // @[Frontend.scala:393:28]
wire [20:0] _frontend_io_ptw_req_bits_bits_addr; // @[Frontend.scala:393:28]
wire _frontend_io_ptw_req_bits_bits_need_gpa; // @[Frontend.scala:393:28]
wire _frontend_io_ptw_req_bits_bits_vstage1; // @[Frontend.scala:393:28]
wire _frontend_io_ptw_req_bits_bits_stage2; // @[Frontend.scala:393:28]
wire _dcache_io_cpu_req_ready; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_s2_nack; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_s2_nack_cause_raw; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_s2_uncached; // @[HellaCache.scala:278:43]
wire [31:0] _dcache_io_cpu_s2_paddr; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_resp_valid; // @[HellaCache.scala:278:43]
wire [33:0] _dcache_io_cpu_resp_bits_addr; // @[HellaCache.scala:278:43]
wire [6:0] _dcache_io_cpu_resp_bits_tag; // @[HellaCache.scala:278:43]
wire [4:0] _dcache_io_cpu_resp_bits_cmd; // @[HellaCache.scala:278:43]
wire [1:0] _dcache_io_cpu_resp_bits_size; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_resp_bits_signed; // @[HellaCache.scala:278:43]
wire [1:0] _dcache_io_cpu_resp_bits_dprv; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_resp_bits_dv; // @[HellaCache.scala:278:43]
wire [63:0] _dcache_io_cpu_resp_bits_data; // @[HellaCache.scala:278:43]
wire [7:0] _dcache_io_cpu_resp_bits_mask; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_resp_bits_replay; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_resp_bits_has_data; // @[HellaCache.scala:278:43]
wire [63:0] _dcache_io_cpu_resp_bits_data_word_bypass; // @[HellaCache.scala:278:43]
wire [63:0] _dcache_io_cpu_resp_bits_data_raw; // @[HellaCache.scala:278:43]
wire [63:0] _dcache_io_cpu_resp_bits_store_data; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_replay_next; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_s2_xcpt_ma_ld; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_s2_xcpt_ma_st; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_s2_xcpt_pf_ld; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_s2_xcpt_pf_st; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_s2_xcpt_ae_ld; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_s2_xcpt_ae_st; // @[HellaCache.scala:278:43]
wire [33:0] _dcache_io_cpu_s2_gpa; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_ordered; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_store_pending; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_perf_acquire; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_perf_grant; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_perf_blocked; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_perf_canAcceptStoreThenLoad; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_perf_canAcceptStoreThenRMW; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_perf_canAcceptLoadThenLoad; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_perf_storeBufferEmptyAfterLoad; // @[HellaCache.scala:278:43]
wire _dcache_io_cpu_perf_storeBufferEmptyAfterStore; // @[HellaCache.scala:278:43]
wire [20:0] _dcache_io_ptw_req_bits_bits_addr; // @[HellaCache.scala:278:43]
wire _dcache_io_ptw_req_bits_bits_need_gpa; // @[HellaCache.scala:278:43]
wire _dcache_io_ptw_req_bits_bits_vstage1; // @[HellaCache.scala:278:43]
wire _dcache_io_ptw_req_bits_bits_stage2; // @[HellaCache.scala:278:43]
wire auto_buffer_in_a_valid_0 = auto_buffer_in_a_valid; // @[RocketTile.scala:141:7]
wire [2:0] auto_buffer_in_a_bits_opcode_0 = auto_buffer_in_a_bits_opcode; // @[RocketTile.scala:141:7]
wire [2:0] auto_buffer_in_a_bits_param_0 = auto_buffer_in_a_bits_param; // @[RocketTile.scala:141:7]
wire [2:0] auto_buffer_in_a_bits_size_0 = auto_buffer_in_a_bits_size; // @[RocketTile.scala:141:7]
wire [7:0] auto_buffer_in_a_bits_source_0 = auto_buffer_in_a_bits_source; // @[RocketTile.scala:141:7]
wire [31:0] auto_buffer_in_a_bits_address_0 = auto_buffer_in_a_bits_address; // @[RocketTile.scala:141:7]
wire [7:0] auto_buffer_in_a_bits_mask_0 = auto_buffer_in_a_bits_mask; // @[RocketTile.scala:141:7]
wire [63:0] auto_buffer_in_a_bits_data_0 = auto_buffer_in_a_bits_data; // @[RocketTile.scala:141:7]
wire auto_buffer_in_a_bits_corrupt_0 = auto_buffer_in_a_bits_corrupt; // @[RocketTile.scala:141:7]
wire auto_buffer_in_d_ready_0 = auto_buffer_in_d_ready; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_ready_0 = auto_buffer_out_a_ready; // @[RocketTile.scala:141:7]
wire auto_buffer_out_d_valid_0 = auto_buffer_out_d_valid; // @[RocketTile.scala:141:7]
wire [2:0] auto_buffer_out_d_bits_opcode_0 = auto_buffer_out_d_bits_opcode; // @[RocketTile.scala:141:7]
wire [1:0] auto_buffer_out_d_bits_param_0 = auto_buffer_out_d_bits_param; // @[RocketTile.scala:141:7]
wire [3:0] auto_buffer_out_d_bits_size_0 = auto_buffer_out_d_bits_size; // @[RocketTile.scala:141:7]
wire auto_buffer_out_d_bits_source_0 = auto_buffer_out_d_bits_source; // @[RocketTile.scala:141:7]
wire auto_buffer_out_d_bits_sink_0 = auto_buffer_out_d_bits_sink; // @[RocketTile.scala:141:7]
wire auto_buffer_out_d_bits_denied_0 = auto_buffer_out_d_bits_denied; // @[RocketTile.scala:141:7]
wire [63:0] auto_buffer_out_d_bits_data_0 = auto_buffer_out_d_bits_data; // @[RocketTile.scala:141:7]
wire auto_buffer_out_d_bits_corrupt_0 = auto_buffer_out_d_bits_corrupt; // @[RocketTile.scala:141:7]
wire auto_int_local_in_2_0_0 = auto_int_local_in_2_0; // @[RocketTile.scala:141:7]
wire auto_int_local_in_1_0_0 = auto_int_local_in_1_0; // @[RocketTile.scala:141:7]
wire auto_int_local_in_1_1_0 = auto_int_local_in_1_1; // @[RocketTile.scala:141:7]
wire auto_int_local_in_0_0_0 = auto_int_local_in_0_0; // @[RocketTile.scala:141:7]
wire auto_hartid_in_0 = auto_hartid_in; // @[RocketTile.scala:141:7]
wire [32:0] tlSlaveXbar__requestAIO_T_2 = 33'h0; // @[Parameters.scala:137:{41,46}]
wire [32:0] tlSlaveXbar__requestAIO_T_3 = 33'h0; // @[Parameters.scala:137:{41,46}]
wire [32:0] tlSlaveXbar__requestCIO_T_1 = 33'h0; // @[Parameters.scala:137:{41,46}]
wire [32:0] tlSlaveXbar__requestCIO_T_2 = 33'h0; // @[Parameters.scala:137:{41,46}]
wire [32:0] tlSlaveXbar__requestCIO_T_3 = 33'h0; // @[Parameters.scala:137:{41,46}]
wire [12:0] tlSlaveXbar__beatsBO_decode_T = 13'h3F; // @[package.scala:243:71]
wire [12:0] tlSlaveXbar__beatsCI_decode_T = 13'h3F; // @[package.scala:243:71]
wire [5:0] tlSlaveXbar__beatsBO_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [5:0] tlSlaveXbar__beatsCI_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [5:0] tlSlaveXbar__beatsBO_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] tlSlaveXbar__beatsCI_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire auto_buffer_in_d_bits_sink = 1'h0; // @[RocketTile.scala:141:7]
wire auto_buffer_in_d_bits_denied = 1'h0; // @[RocketTile.scala:141:7]
wire auto_buffer_in_d_bits_corrupt = 1'h0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_bits_corrupt = 1'h0; // @[RocketTile.scala:141:7]
wire auto_cease_out_0 = 1'h0; // @[RocketTile.scala:141:7]
wire auto_halt_out_0 = 1'h0; // @[RocketTile.scala:141:7]
wire auto_trace_core_source_out_group_0_iretire = 1'h0; // @[RocketTile.scala:141:7]
wire auto_trace_core_source_out_group_0_ilastsize = 1'h0; // @[RocketTile.scala:141:7]
wire tlSlaveXbar_auto_anon_in_d_bits_sink = 1'h0; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_in_d_bits_denied = 1'h0; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_in_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_out_d_bits_sink = 1'h0; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_out_d_bits_denied = 1'h0; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_out_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9]
wire tlSlaveXbar_anonOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17]
wire tlSlaveXbar_anonOut_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17]
wire tlSlaveXbar_anonOut_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire tlSlaveXbar_anonIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire tlSlaveXbar_anonIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire tlSlaveXbar_anonIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire tlSlaveXbar_in_0_d_bits_sink = 1'h0; // @[Xbar.scala:159:18]
wire tlSlaveXbar_in_0_d_bits_denied = 1'h0; // @[Xbar.scala:159:18]
wire tlSlaveXbar_in_0_d_bits_corrupt = 1'h0; // @[Xbar.scala:159:18]
wire tlSlaveXbar_out_0_d_bits_sink = 1'h0; // @[Xbar.scala:216:19]
wire tlSlaveXbar_out_0_d_bits_denied = 1'h0; // @[Xbar.scala:216:19]
wire tlSlaveXbar_out_0_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19]
wire tlSlaveXbar__out_0_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53]
wire tlSlaveXbar__addressC_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire tlSlaveXbar__addressC_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire tlSlaveXbar__addressC_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire tlSlaveXbar__addressC_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire tlSlaveXbar__addressC_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire tlSlaveXbar__addressC_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire tlSlaveXbar__requestBOI_WIRE_ready = 1'h0; // @[Bundles.scala:264:74]
wire tlSlaveXbar__requestBOI_WIRE_valid = 1'h0; // @[Bundles.scala:264:74]
wire tlSlaveXbar__requestBOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire tlSlaveXbar__requestBOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61]
wire tlSlaveXbar__requestBOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61]
wire tlSlaveXbar__requestBOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire tlSlaveXbar__requestBOI_T = 1'h0; // @[Parameters.scala:54:10]
wire tlSlaveXbar__requestDOI_T = 1'h0; // @[Parameters.scala:54:10]
wire tlSlaveXbar__requestEIO_WIRE_ready = 1'h0; // @[Bundles.scala:267:74]
wire tlSlaveXbar__requestEIO_WIRE_valid = 1'h0; // @[Bundles.scala:267:74]
wire tlSlaveXbar__requestEIO_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74]
wire tlSlaveXbar__requestEIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61]
wire tlSlaveXbar__requestEIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61]
wire tlSlaveXbar__requestEIO_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61]
wire tlSlaveXbar__beatsBO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74]
wire tlSlaveXbar__beatsBO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74]
wire tlSlaveXbar__beatsBO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire tlSlaveXbar__beatsBO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61]
wire tlSlaveXbar__beatsBO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61]
wire tlSlaveXbar__beatsBO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire tlSlaveXbar__beatsBO_opdata_T = 1'h0; // @[Edges.scala:97:37]
wire tlSlaveXbar__beatsCI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire tlSlaveXbar__beatsCI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire tlSlaveXbar__beatsCI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire tlSlaveXbar__beatsCI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire tlSlaveXbar__beatsCI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire tlSlaveXbar__beatsCI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire tlSlaveXbar_beatsCI_opdata = 1'h0; // @[Edges.scala:102:36]
wire tlSlaveXbar__beatsEI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74]
wire tlSlaveXbar__beatsEI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74]
wire tlSlaveXbar__beatsEI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74]
wire tlSlaveXbar__beatsEI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61]
wire tlSlaveXbar__beatsEI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61]
wire tlSlaveXbar__beatsEI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61]
wire tlSlaveXbar__portsBIO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74]
wire tlSlaveXbar__portsBIO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74]
wire tlSlaveXbar__portsBIO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74]
wire tlSlaveXbar__portsBIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61]
wire tlSlaveXbar__portsBIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61]
wire tlSlaveXbar__portsBIO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61]
wire tlSlaveXbar_portsBIO_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar_portsBIO_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar_portsBIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar__portsBIO_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire tlSlaveXbar__portsCOI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire tlSlaveXbar__portsCOI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire tlSlaveXbar__portsCOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire tlSlaveXbar__portsCOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire tlSlaveXbar__portsCOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire tlSlaveXbar__portsCOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire tlSlaveXbar_portsCOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar_portsCOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar_portsCOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar__portsCOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire tlSlaveXbar_portsDIO_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar_portsDIO_filtered_0_bits_denied = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar_portsDIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar__portsEOI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74]
wire tlSlaveXbar__portsEOI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74]
wire tlSlaveXbar__portsEOI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74]
wire tlSlaveXbar__portsEOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61]
wire tlSlaveXbar__portsEOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61]
wire tlSlaveXbar__portsEOI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61]
wire tlSlaveXbar_portsEOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar_portsEOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar_portsEOI_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24]
wire tlSlaveXbar__portsEOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40]
wire broadcast_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire broadcast_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire broadcast__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire broadcast_1_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire broadcast_1_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire broadcast_1__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire nexus_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire nexus_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire nexus__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire nexus_nodeOut = 1'h0; // @[MixedNode.scala:542:17]
wire nexus_1_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire nexus_1_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire nexus_1__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire nexus_1_x1_bundleOut_x_sourceOpt_enable = 1'h0; // @[BaseTile.scala:305:19]
wire nexus_1_x1_bundleOut_x_sourceOpt_stall = 1'h0; // @[BaseTile.scala:305:19]
wire nexus_1_nodeOut_enable = 1'h0; // @[MixedNode.scala:542:17]
wire nexus_1_nodeOut_stall = 1'h0; // @[MixedNode.scala:542:17]
wire nexus_1_defaultWireOpt_enable = 1'h0; // @[BaseTile.scala:305:19]
wire nexus_1_defaultWireOpt_stall = 1'h0; // @[BaseTile.scala:305:19]
wire broadcast_2_auto_in_0_rvalid_0 = 1'h0; // @[BundleBridgeNexus.scala:20:9]
wire broadcast_2_auto_in_0_wvalid_0 = 1'h0; // @[BundleBridgeNexus.scala:20:9]
wire broadcast_2_auto_in_0_ivalid_0 = 1'h0; // @[BundleBridgeNexus.scala:20:9]
wire broadcast_2_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire broadcast_2_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire broadcast_2__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire broadcast_2_nodeIn_0_rvalid_0 = 1'h0; // @[MixedNode.scala:551:17]
wire broadcast_2_nodeIn_0_wvalid_0 = 1'h0; // @[MixedNode.scala:551:17]
wire broadcast_2_nodeIn_0_ivalid_0 = 1'h0; // @[MixedNode.scala:551:17]
wire widget_auto_anon_in_a_bits_source = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_user_amba_prot_fetch = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_source = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_source = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_fetch = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_bits_source = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_bits_source = 1'h0; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_fetch = 1'h0; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire widget_anonOut_d_bits_source = 1'h0; // @[MixedNode.scala:542:17]
wire widget_anonIn_a_bits_source = 1'h0; // @[MixedNode.scala:551:17]
wire widget_anonIn_a_bits_user_amba_prot_fetch = 1'h0; // @[MixedNode.scala:551:17]
wire widget_anonIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_source = 1'h0; // @[MixedNode.scala:551:17]
wire widget_1_auto_anon_in_a_bits_source = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_d_bits_source = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_bits_source = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_d_bits_source = 1'h0; // @[WidthWidget.scala:27:9]
wire widget_1_anonOut_a_bits_source = 1'h0; // @[MixedNode.scala:542:17]
wire widget_1_anonOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire widget_1_anonOut_d_bits_source = 1'h0; // @[MixedNode.scala:542:17]
wire widget_1_anonIn_a_bits_source = 1'h0; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_d_bits_source = 1'h0; // @[MixedNode.scala:551:17]
wire buffer_auto_in_a_bits_corrupt = 1'h0; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_bits_corrupt = 1'h0; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire buffer_1_auto_in_d_bits_sink = 1'h0; // @[Buffer.scala:40:9]
wire buffer_1_auto_in_d_bits_denied = 1'h0; // @[Buffer.scala:40:9]
wire buffer_1_auto_in_d_bits_corrupt = 1'h0; // @[Buffer.scala:40:9]
wire buffer_1_auto_out_d_bits_sink = 1'h0; // @[Buffer.scala:40:9]
wire buffer_1_auto_out_d_bits_denied = 1'h0; // @[Buffer.scala:40:9]
wire buffer_1_auto_out_d_bits_corrupt = 1'h0; // @[Buffer.scala:40:9]
wire buffer_1_nodeOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17]
wire buffer_1_nodeOut_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17]
wire buffer_1_nodeOut_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire buffer_1_nodeIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire buffer_1_nodeIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire buffer_1_nodeIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire tlOtherMastersNodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire tlOtherMastersNodeIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire traceCoreSourceNodeOut_group_0_iretire = 1'h0; // @[MixedNode.scala:542:17]
wire traceCoreSourceNodeOut_group_0_ilastsize = 1'h0; // @[MixedNode.scala:542:17]
wire bundleIn_x_sourceOpt_enable = 1'h0; // @[BaseTile.scala:305:19]
wire bundleIn_x_sourceOpt_stall = 1'h0; // @[BaseTile.scala:305:19]
wire traceAuxSinkNodeIn_enable = 1'h0; // @[MixedNode.scala:551:17]
wire traceAuxSinkNodeIn_stall = 1'h0; // @[MixedNode.scala:551:17]
wire bpwatchSourceNodeOut_0_rvalid_0 = 1'h0; // @[MixedNode.scala:542:17]
wire bpwatchSourceNodeOut_0_wvalid_0 = 1'h0; // @[MixedNode.scala:542:17]
wire bpwatchSourceNodeOut_0_ivalid_0 = 1'h0; // @[MixedNode.scala:542:17]
wire haltNodeOut_0 = 1'h0; // @[MixedNode.scala:542:17]
wire ceaseNodeOut_0 = 1'h0; // @[MixedNode.scala:542:17]
wire slaveNodeOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17]
wire slaveNodeOut_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17]
wire slaveNodeOut_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire slaveNodeIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire slaveNodeIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire slaveNodeIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire [2:0] widget_1_auto_anon_in_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_auto_anon_out_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_anonOut_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_anonIn_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_auto_anon_in_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_auto_anon_out_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_anonOut_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_anonIn_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9]
wire [7:0] widget_1_auto_anon_in_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9]
wire [7:0] widget_1_auto_anon_out_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9]
wire [7:0] widget_1_anonOut_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9]
wire [7:0] widget_1_anonIn_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9]
wire [2:0] tlSlaveXbar__addressC_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] tlSlaveXbar__addressC_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] tlSlaveXbar__addressC_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] tlSlaveXbar__addressC_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] tlSlaveXbar__addressC_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] tlSlaveXbar__addressC_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] tlSlaveXbar__requestBOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] tlSlaveXbar__requestBOI_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] tlSlaveXbar__requestBOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] tlSlaveXbar__requestBOI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] tlSlaveXbar__beatsBO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] tlSlaveXbar__beatsBO_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] tlSlaveXbar__beatsBO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] tlSlaveXbar__beatsBO_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] tlSlaveXbar_beatsBO_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] tlSlaveXbar_beatsBO_0 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] tlSlaveXbar__beatsCI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] tlSlaveXbar__beatsCI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] tlSlaveXbar__beatsCI_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] tlSlaveXbar__beatsCI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] tlSlaveXbar__beatsCI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] tlSlaveXbar__beatsCI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] tlSlaveXbar_beatsCI_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] tlSlaveXbar_beatsCI_0 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] tlSlaveXbar__portsBIO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] tlSlaveXbar__portsBIO_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74]
wire [2:0] tlSlaveXbar__portsBIO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] tlSlaveXbar__portsBIO_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61]
wire [2:0] tlSlaveXbar_portsBIO_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] tlSlaveXbar_portsBIO_filtered_0_bits_size = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] tlSlaveXbar__portsCOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] tlSlaveXbar__portsCOI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] tlSlaveXbar__portsCOI_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] tlSlaveXbar__portsCOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] tlSlaveXbar__portsCOI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] tlSlaveXbar__portsCOI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] tlSlaveXbar_portsCOI_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] tlSlaveXbar_portsCOI_filtered_0_bits_param = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] tlSlaveXbar_portsCOI_filtered_0_bits_size = 3'h0; // @[Xbar.scala:352:24]
wire [2:0] widget_1_auto_anon_in_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_auto_anon_out_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_anonOut_a_bits_param = 3'h0; // @[MixedNode.scala:542:17]
wire [2:0] widget_1_anonIn_a_bits_param = 3'h0; // @[MixedNode.scala:551:17]
wire [7:0] tlSlaveXbar__addressC_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] tlSlaveXbar__addressC_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] tlSlaveXbar__requestBOI_WIRE_bits_source = 8'h0; // @[Bundles.scala:264:74]
wire [7:0] tlSlaveXbar__requestBOI_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74]
wire [7:0] tlSlaveXbar__requestBOI_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:264:61]
wire [7:0] tlSlaveXbar__requestBOI_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61]
wire [7:0] tlSlaveXbar__requestBOI_uncommonBits_T = 8'h0; // @[Parameters.scala:52:29]
wire [7:0] tlSlaveXbar_requestBOI_uncommonBits = 8'h0; // @[Parameters.scala:52:56]
wire [7:0] tlSlaveXbar__beatsBO_WIRE_bits_source = 8'h0; // @[Bundles.scala:264:74]
wire [7:0] tlSlaveXbar__beatsBO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74]
wire [7:0] tlSlaveXbar__beatsBO_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:264:61]
wire [7:0] tlSlaveXbar__beatsBO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61]
wire [7:0] tlSlaveXbar__beatsCI_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] tlSlaveXbar__beatsCI_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] tlSlaveXbar__portsBIO_WIRE_bits_source = 8'h0; // @[Bundles.scala:264:74]
wire [7:0] tlSlaveXbar__portsBIO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74]
wire [7:0] tlSlaveXbar__portsBIO_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:264:61]
wire [7:0] tlSlaveXbar__portsBIO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61]
wire [7:0] tlSlaveXbar_portsBIO_filtered_0_bits_source = 8'h0; // @[Xbar.scala:352:24]
wire [7:0] tlSlaveXbar_portsBIO_filtered_0_bits_mask = 8'h0; // @[Xbar.scala:352:24]
wire [7:0] tlSlaveXbar__portsCOI_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] tlSlaveXbar__portsCOI_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] tlSlaveXbar_portsCOI_filtered_0_bits_source = 8'h0; // @[Xbar.scala:352:24]
wire [63:0] tlSlaveXbar__addressC_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] tlSlaveXbar__addressC_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] tlSlaveXbar__requestBOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74]
wire [63:0] tlSlaveXbar__requestBOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61]
wire [63:0] tlSlaveXbar__beatsBO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74]
wire [63:0] tlSlaveXbar__beatsBO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61]
wire [63:0] tlSlaveXbar__beatsCI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] tlSlaveXbar__beatsCI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] tlSlaveXbar__portsBIO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74]
wire [63:0] tlSlaveXbar__portsBIO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61]
wire [63:0] tlSlaveXbar_portsBIO_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24]
wire [63:0] tlSlaveXbar__portsCOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] tlSlaveXbar__portsCOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] tlSlaveXbar_portsCOI_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24]
wire [63:0] widget_1_auto_anon_in_a_bits_data = 64'h0; // @[WidthWidget.scala:27:9]
wire [63:0] widget_1_auto_anon_out_a_bits_data = 64'h0; // @[WidthWidget.scala:27:9]
wire [63:0] widget_1_anonOut_a_bits_data = 64'h0; // @[MixedNode.scala:542:17]
wire [63:0] widget_1_anonIn_a_bits_data = 64'h0; // @[MixedNode.scala:551:17]
wire [31:0] auto_reset_vector_in = 32'h10000; // @[RocketTile.scala:141:7]
wire [31:0] broadcast_1_auto_in = 32'h10000; // @[RocketTile.scala:141:7]
wire [31:0] broadcast_1_auto_out_1 = 32'h10000; // @[RocketTile.scala:141:7]
wire [31:0] broadcast_1_auto_out_0 = 32'h10000; // @[RocketTile.scala:141:7]
wire [31:0] broadcast_1_nodeIn = 32'h10000; // @[RocketTile.scala:141:7]
wire [31:0] broadcast_1_nodeOut = 32'h10000; // @[RocketTile.scala:141:7]
wire [31:0] broadcast_1_x1_nodeOut = 32'h10000; // @[RocketTile.scala:141:7]
wire [31:0] resetVectorSinkNodeIn = 32'h10000; // @[RocketTile.scala:141:7]
wire [31:0] reset_vectorOut = 32'h10000; // @[RocketTile.scala:141:7]
wire [31:0] reset_vectorIn = 32'h10000; // @[RocketTile.scala:141:7]
wire [3:0] auto_trace_core_source_out_group_0_itype = 4'h0; // @[RocketTile.scala:141:7]
wire [3:0] auto_trace_core_source_out_priv = 4'h0; // @[RocketTile.scala:141:7]
wire [3:0] traceCoreSourceNodeOut_group_0_itype = 4'h0; // @[MixedNode.scala:542:17]
wire [3:0] traceCoreSourceNodeOut_priv = 4'h0; // @[MixedNode.scala:542:17]
wire [31:0] auto_trace_core_source_out_group_0_iaddr = 32'h0; // @[RocketTile.scala:141:7]
wire [31:0] auto_trace_core_source_out_tval = 32'h0; // @[RocketTile.scala:141:7]
wire [31:0] auto_trace_core_source_out_cause = 32'h0; // @[RocketTile.scala:141:7]
wire [31:0] tlSlaveXbar__addressC_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] tlSlaveXbar__addressC_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] tlSlaveXbar__requestCIO_T = 32'h0; // @[Parameters.scala:137:31]
wire [31:0] tlSlaveXbar__requestBOI_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] tlSlaveXbar__requestBOI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] tlSlaveXbar__beatsBO_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] tlSlaveXbar__beatsBO_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] tlSlaveXbar__beatsCI_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] tlSlaveXbar__beatsCI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] tlSlaveXbar__portsBIO_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74]
wire [31:0] tlSlaveXbar__portsBIO_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61]
wire [31:0] tlSlaveXbar_portsBIO_filtered_0_bits_address = 32'h0; // @[Xbar.scala:352:24]
wire [31:0] tlSlaveXbar__portsCOI_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] tlSlaveXbar__portsCOI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] tlSlaveXbar_portsCOI_filtered_0_bits_address = 32'h0; // @[Xbar.scala:352:24]
wire [31:0] traceCoreSourceNodeOut_group_0_iaddr = 32'h0; // @[MixedNode.scala:542:17]
wire [31:0] traceCoreSourceNodeOut_tval = 32'h0; // @[MixedNode.scala:542:17]
wire [31:0] traceCoreSourceNodeOut_cause = 32'h0; // @[MixedNode.scala:542:17]
wire [1:0] auto_buffer_in_d_bits_param = 2'h0; // @[RocketTile.scala:141:7]
wire [1:0] tlSlaveXbar_auto_anon_in_d_bits_param = 2'h0; // @[Xbar.scala:74:9]
wire [1:0] tlSlaveXbar_auto_anon_out_d_bits_param = 2'h0; // @[Xbar.scala:74:9]
wire [1:0] tlSlaveXbar_anonOut_d_bits_param = 2'h0; // @[MixedNode.scala:542:17]
wire [1:0] tlSlaveXbar_anonIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] tlSlaveXbar_in_0_d_bits_param = 2'h0; // @[Xbar.scala:159:18]
wire [1:0] tlSlaveXbar_out_0_d_bits_param = 2'h0; // @[Xbar.scala:216:19]
wire [1:0] tlSlaveXbar__requestBOI_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] tlSlaveXbar__requestBOI_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] tlSlaveXbar__beatsBO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] tlSlaveXbar__beatsBO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] tlSlaveXbar__portsBIO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74]
wire [1:0] tlSlaveXbar__portsBIO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61]
wire [1:0] tlSlaveXbar_portsBIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24]
wire [1:0] tlSlaveXbar_portsDIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24]
wire [1:0] buffer_1_auto_in_d_bits_param = 2'h0; // @[Buffer.scala:40:9]
wire [1:0] buffer_1_auto_out_d_bits_param = 2'h0; // @[Buffer.scala:40:9]
wire [1:0] buffer_1_nodeOut_d_bits_param = 2'h0; // @[MixedNode.scala:542:17]
wire [1:0] buffer_1_nodeIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] slaveNodeOut_d_bits_param = 2'h0; // @[MixedNode.scala:542:17]
wire [1:0] slaveNodeIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire tlSlaveXbar__requestAIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire tlSlaveXbar_requestAIO_0_0 = 1'h1; // @[Xbar.scala:307:107]
wire tlSlaveXbar__requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire tlSlaveXbar_requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107]
wire tlSlaveXbar__requestBOI_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire tlSlaveXbar__requestBOI_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire tlSlaveXbar__requestBOI_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire tlSlaveXbar__requestBOI_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire tlSlaveXbar_requestBOI_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire tlSlaveXbar__requestDOI_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire tlSlaveXbar__requestDOI_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire tlSlaveXbar__requestDOI_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire tlSlaveXbar__requestDOI_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire tlSlaveXbar_requestDOI_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire tlSlaveXbar_beatsBO_opdata = 1'h1; // @[Edges.scala:97:28]
wire tlSlaveXbar__portsAOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire tlSlaveXbar__portsBIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire tlSlaveXbar__portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire tlSlaveXbar__portsDIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire tlSlaveXbar__portsEOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire widget_auto_anon_in_a_bits_user_amba_prot_secure = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_secure = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_bits_user_amba_prot_secure = 1'h1; // @[MixedNode.scala:542:17]
wire widget_anonIn_a_bits_user_amba_prot_secure = 1'h1; // @[MixedNode.scala:551:17]
wire widget_1_auto_anon_in_a_bits_user_amba_prot_bufferable = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_bits_user_amba_prot_modifiable = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_bits_user_amba_prot_privileged = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_bits_user_amba_prot_secure = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_bits_user_amba_prot_fetch = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_d_ready = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_bits_user_amba_prot_bufferable = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_bits_user_amba_prot_modifiable = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_bits_user_amba_prot_privileged = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_bits_user_amba_prot_secure = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_bits_user_amba_prot_fetch = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_d_ready = 1'h1; // @[WidthWidget.scala:27:9]
wire widget_1_anonOut_a_bits_user_amba_prot_bufferable = 1'h1; // @[MixedNode.scala:542:17]
wire widget_1_anonOut_a_bits_user_amba_prot_modifiable = 1'h1; // @[MixedNode.scala:542:17]
wire widget_1_anonOut_a_bits_user_amba_prot_privileged = 1'h1; // @[MixedNode.scala:542:17]
wire widget_1_anonOut_a_bits_user_amba_prot_secure = 1'h1; // @[MixedNode.scala:542:17]
wire widget_1_anonOut_a_bits_user_amba_prot_fetch = 1'h1; // @[MixedNode.scala:542:17]
wire widget_1_anonOut_d_ready = 1'h1; // @[MixedNode.scala:542:17]
wire widget_1_anonIn_a_bits_user_amba_prot_bufferable = 1'h1; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_a_bits_user_amba_prot_modifiable = 1'h1; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_a_bits_user_amba_prot_privileged = 1'h1; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_a_bits_user_amba_prot_secure = 1'h1; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_a_bits_user_amba_prot_fetch = 1'h1; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_d_ready = 1'h1; // @[MixedNode.scala:551:17]
wire buffer_1_auto_in_a_ready; // @[Buffer.scala:40:9]
wire buffer_1_auto_in_a_valid = auto_buffer_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_auto_in_a_bits_opcode = auto_buffer_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_auto_in_a_bits_param = auto_buffer_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_auto_in_a_bits_size = auto_buffer_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_auto_in_a_bits_source = auto_buffer_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] buffer_1_auto_in_a_bits_address = auto_buffer_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_auto_in_a_bits_mask = auto_buffer_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] buffer_1_auto_in_a_bits_data = auto_buffer_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire buffer_1_auto_in_a_bits_corrupt = auto_buffer_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire buffer_1_auto_in_d_ready = auto_buffer_in_d_ready_0; // @[Buffer.scala:40:9]
wire buffer_1_auto_in_d_valid; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_auto_in_d_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_auto_in_d_bits_size; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_auto_in_d_bits_source; // @[Buffer.scala:40:9]
wire [63:0] buffer_1_auto_in_d_bits_data; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_ready = auto_buffer_out_a_ready_0; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_valid; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_out_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_out_a_bits_param; // @[Buffer.scala:40:9]
wire [3:0] buffer_auto_out_a_bits_size; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] buffer_auto_out_a_bits_address; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_bits_user_amba_prot_bufferable; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_bits_user_amba_prot_modifiable; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_bits_user_amba_prot_readalloc; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_bits_user_amba_prot_writealloc; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_bits_user_amba_prot_privileged; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_bits_user_amba_prot_secure; // @[Buffer.scala:40:9]
wire buffer_auto_out_a_bits_user_amba_prot_fetch; // @[Buffer.scala:40:9]
wire [7:0] buffer_auto_out_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] buffer_auto_out_a_bits_data; // @[Buffer.scala:40:9]
wire buffer_auto_out_d_ready; // @[Buffer.scala:40:9]
wire buffer_auto_out_d_valid = auto_buffer_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] buffer_auto_out_d_bits_opcode = auto_buffer_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] buffer_auto_out_d_bits_param = auto_buffer_out_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] buffer_auto_out_d_bits_size = auto_buffer_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire buffer_auto_out_d_bits_source = auto_buffer_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire buffer_auto_out_d_bits_sink = auto_buffer_out_d_bits_sink_0; // @[Buffer.scala:40:9]
wire buffer_auto_out_d_bits_denied = auto_buffer_out_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] buffer_auto_out_d_bits_data = auto_buffer_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire buffer_auto_out_d_bits_corrupt = auto_buffer_out_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire wfiNodeOut_0; // @[MixedNode.scala:542:17]
wire x1_int_localIn_1_0 = auto_int_local_in_2_0_0; // @[RocketTile.scala:141:7]
wire x1_int_localIn_0 = auto_int_local_in_1_0_0; // @[RocketTile.scala:141:7]
wire x1_int_localIn_1 = auto_int_local_in_1_1_0; // @[RocketTile.scala:141:7]
wire int_localIn_0 = auto_int_local_in_0_0_0; // @[RocketTile.scala:141:7]
wire traceSourceNodeOut_insns_0_valid; // @[MixedNode.scala:542:17]
wire [33:0] traceSourceNodeOut_insns_0_iaddr; // @[MixedNode.scala:542:17]
wire [31:0] traceSourceNodeOut_insns_0_insn; // @[MixedNode.scala:542:17]
wire [2:0] traceSourceNodeOut_insns_0_priv; // @[MixedNode.scala:542:17]
wire traceSourceNodeOut_insns_0_exception; // @[MixedNode.scala:542:17]
wire traceSourceNodeOut_insns_0_interrupt; // @[MixedNode.scala:542:17]
wire [63:0] traceSourceNodeOut_insns_0_cause; // @[MixedNode.scala:542:17]
wire [33:0] traceSourceNodeOut_insns_0_tval; // @[MixedNode.scala:542:17]
wire [63:0] traceSourceNodeOut_time; // @[MixedNode.scala:542:17]
wire hartidIn = auto_hartid_in_0; // @[RocketTile.scala:141:7]
wire auto_buffer_in_a_ready_0; // @[RocketTile.scala:141:7]
wire [2:0] auto_buffer_in_d_bits_opcode_0; // @[RocketTile.scala:141:7]
wire [2:0] auto_buffer_in_d_bits_size_0; // @[RocketTile.scala:141:7]
wire [7:0] auto_buffer_in_d_bits_source_0; // @[RocketTile.scala:141:7]
wire [63:0] auto_buffer_in_d_bits_data_0; // @[RocketTile.scala:141:7]
wire auto_buffer_in_d_valid_0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_bits_user_amba_prot_bufferable_0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_bits_user_amba_prot_modifiable_0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_bits_user_amba_prot_readalloc_0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_bits_user_amba_prot_writealloc_0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_bits_user_amba_prot_privileged_0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_bits_user_amba_prot_secure_0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_bits_user_amba_prot_fetch_0; // @[RocketTile.scala:141:7]
wire [2:0] auto_buffer_out_a_bits_opcode_0; // @[RocketTile.scala:141:7]
wire [2:0] auto_buffer_out_a_bits_param_0; // @[RocketTile.scala:141:7]
wire [3:0] auto_buffer_out_a_bits_size_0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_bits_source_0; // @[RocketTile.scala:141:7]
wire [31:0] auto_buffer_out_a_bits_address_0; // @[RocketTile.scala:141:7]
wire [7:0] auto_buffer_out_a_bits_mask_0; // @[RocketTile.scala:141:7]
wire [63:0] auto_buffer_out_a_bits_data_0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_a_valid_0; // @[RocketTile.scala:141:7]
wire auto_buffer_out_d_ready_0; // @[RocketTile.scala:141:7]
wire auto_wfi_out_0_0; // @[RocketTile.scala:141:7]
wire auto_trace_source_out_insns_0_valid_0; // @[RocketTile.scala:141:7]
wire [33:0] auto_trace_source_out_insns_0_iaddr_0; // @[RocketTile.scala:141:7]
wire [31:0] auto_trace_source_out_insns_0_insn_0; // @[RocketTile.scala:141:7]
wire [2:0] auto_trace_source_out_insns_0_priv_0; // @[RocketTile.scala:141:7]
wire auto_trace_source_out_insns_0_exception_0; // @[RocketTile.scala:141:7]
wire auto_trace_source_out_insns_0_interrupt_0; // @[RocketTile.scala:141:7]
wire [63:0] auto_trace_source_out_insns_0_cause_0; // @[RocketTile.scala:141:7]
wire [33:0] auto_trace_source_out_insns_0_tval_0; // @[RocketTile.scala:141:7]
wire [63:0] auto_trace_source_out_time_0; // @[RocketTile.scala:141:7]
wire tlSlaveXbar_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire slaveNodeOut_a_ready = tlSlaveXbar_auto_anon_in_a_ready; // @[Xbar.scala:74:9]
wire slaveNodeOut_a_valid; // @[MixedNode.scala:542:17]
wire tlSlaveXbar_anonIn_a_valid = tlSlaveXbar_auto_anon_in_a_valid; // @[Xbar.scala:74:9]
wire [2:0] slaveNodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] tlSlaveXbar_anonIn_a_bits_opcode = tlSlaveXbar_auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] slaveNodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] tlSlaveXbar_anonIn_a_bits_param = tlSlaveXbar_auto_anon_in_a_bits_param; // @[Xbar.scala:74:9]
wire [2:0] slaveNodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] tlSlaveXbar_anonIn_a_bits_size = tlSlaveXbar_auto_anon_in_a_bits_size; // @[Xbar.scala:74:9]
wire [7:0] slaveNodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [7:0] tlSlaveXbar_anonIn_a_bits_source = tlSlaveXbar_auto_anon_in_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] slaveNodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [31:0] tlSlaveXbar_anonIn_a_bits_address = tlSlaveXbar_auto_anon_in_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] slaveNodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [7:0] tlSlaveXbar_anonIn_a_bits_mask = tlSlaveXbar_auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] slaveNodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire [63:0] tlSlaveXbar_anonIn_a_bits_data = tlSlaveXbar_auto_anon_in_a_bits_data; // @[Xbar.scala:74:9]
wire slaveNodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire tlSlaveXbar_anonIn_a_bits_corrupt = tlSlaveXbar_auto_anon_in_a_bits_corrupt; // @[Xbar.scala:74:9]
wire slaveNodeOut_d_ready; // @[MixedNode.scala:542:17]
wire tlSlaveXbar_anonIn_d_ready = tlSlaveXbar_auto_anon_in_d_ready; // @[Xbar.scala:74:9]
wire tlSlaveXbar_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] tlSlaveXbar_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire slaveNodeOut_d_valid = tlSlaveXbar_auto_anon_in_d_valid; // @[Xbar.scala:74:9]
wire [2:0] slaveNodeOut_d_bits_opcode = tlSlaveXbar_auto_anon_in_d_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [7:0] tlSlaveXbar_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] slaveNodeOut_d_bits_size = tlSlaveXbar_auto_anon_in_d_bits_size; // @[Xbar.scala:74:9]
wire [7:0] slaveNodeOut_d_bits_source = tlSlaveXbar_auto_anon_in_d_bits_source; // @[Xbar.scala:74:9]
wire [63:0] tlSlaveXbar_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire [63:0] slaveNodeOut_d_bits_data = tlSlaveXbar_auto_anon_in_d_bits_data; // @[Xbar.scala:74:9]
wire tlSlaveXbar_anonOut_a_ready = tlSlaveXbar_auto_anon_out_a_ready; // @[Xbar.scala:74:9]
wire tlSlaveXbar_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] tlSlaveXbar_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] tlSlaveXbar_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] tlSlaveXbar_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [7:0] tlSlaveXbar_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] tlSlaveXbar_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] tlSlaveXbar_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] tlSlaveXbar_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire tlSlaveXbar_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire tlSlaveXbar_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire tlSlaveXbar_anonOut_d_valid = tlSlaveXbar_auto_anon_out_d_valid; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_anonOut_d_bits_opcode = tlSlaveXbar_auto_anon_out_d_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_anonOut_d_bits_size = tlSlaveXbar_auto_anon_out_d_bits_size; // @[Xbar.scala:74:9]
wire [7:0] tlSlaveXbar_anonOut_d_bits_source = tlSlaveXbar_auto_anon_out_d_bits_source; // @[Xbar.scala:74:9]
wire [63:0] tlSlaveXbar_anonOut_d_bits_data = tlSlaveXbar_auto_anon_out_d_bits_data; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_auto_anon_out_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_auto_anon_out_a_bits_param; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_auto_anon_out_a_bits_size; // @[Xbar.scala:74:9]
wire [7:0] tlSlaveXbar_auto_anon_out_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] tlSlaveXbar_auto_anon_out_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] tlSlaveXbar_auto_anon_out_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] tlSlaveXbar_auto_anon_out_a_bits_data; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_out_a_bits_corrupt; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_out_a_valid; // @[Xbar.scala:74:9]
wire tlSlaveXbar_auto_anon_out_d_ready; // @[Xbar.scala:74:9]
wire tlSlaveXbar_out_0_a_ready = tlSlaveXbar_anonOut_a_ready; // @[Xbar.scala:216:19]
wire tlSlaveXbar_out_0_a_valid; // @[Xbar.scala:216:19]
assign tlSlaveXbar_auto_anon_out_a_valid = tlSlaveXbar_anonOut_a_valid; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign tlSlaveXbar_auto_anon_out_a_bits_opcode = tlSlaveXbar_anonOut_a_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_out_0_a_bits_param; // @[Xbar.scala:216:19]
assign tlSlaveXbar_auto_anon_out_a_bits_param = tlSlaveXbar_anonOut_a_bits_param; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_out_0_a_bits_size; // @[Xbar.scala:216:19]
assign tlSlaveXbar_auto_anon_out_a_bits_size = tlSlaveXbar_anonOut_a_bits_size; // @[Xbar.scala:74:9]
wire [7:0] tlSlaveXbar_out_0_a_bits_source; // @[Xbar.scala:216:19]
assign tlSlaveXbar_auto_anon_out_a_bits_source = tlSlaveXbar_anonOut_a_bits_source; // @[Xbar.scala:74:9]
wire [31:0] tlSlaveXbar_out_0_a_bits_address; // @[Xbar.scala:216:19]
assign tlSlaveXbar_auto_anon_out_a_bits_address = tlSlaveXbar_anonOut_a_bits_address; // @[Xbar.scala:74:9]
wire [7:0] tlSlaveXbar_out_0_a_bits_mask; // @[Xbar.scala:216:19]
assign tlSlaveXbar_auto_anon_out_a_bits_mask = tlSlaveXbar_anonOut_a_bits_mask; // @[Xbar.scala:74:9]
wire [63:0] tlSlaveXbar_out_0_a_bits_data; // @[Xbar.scala:216:19]
assign tlSlaveXbar_auto_anon_out_a_bits_data = tlSlaveXbar_anonOut_a_bits_data; // @[Xbar.scala:74:9]
wire tlSlaveXbar_out_0_a_bits_corrupt; // @[Xbar.scala:216:19]
assign tlSlaveXbar_auto_anon_out_a_bits_corrupt = tlSlaveXbar_anonOut_a_bits_corrupt; // @[Xbar.scala:74:9]
wire tlSlaveXbar_out_0_d_ready; // @[Xbar.scala:216:19]
assign tlSlaveXbar_auto_anon_out_d_ready = tlSlaveXbar_anonOut_d_ready; // @[Xbar.scala:74:9]
wire tlSlaveXbar_out_0_d_valid = tlSlaveXbar_anonOut_d_valid; // @[Xbar.scala:216:19]
wire [2:0] tlSlaveXbar_out_0_d_bits_opcode = tlSlaveXbar_anonOut_d_bits_opcode; // @[Xbar.scala:216:19]
wire [2:0] tlSlaveXbar_out_0_d_bits_size = tlSlaveXbar_anonOut_d_bits_size; // @[Xbar.scala:216:19]
wire [7:0] tlSlaveXbar_out_0_d_bits_source = tlSlaveXbar_anonOut_d_bits_source; // @[Xbar.scala:216:19]
wire [63:0] tlSlaveXbar_out_0_d_bits_data = tlSlaveXbar_anonOut_d_bits_data; // @[Xbar.scala:216:19]
wire tlSlaveXbar_in_0_a_ready; // @[Xbar.scala:159:18]
assign tlSlaveXbar_auto_anon_in_a_ready = tlSlaveXbar_anonIn_a_ready; // @[Xbar.scala:74:9]
wire tlSlaveXbar_in_0_a_valid = tlSlaveXbar_anonIn_a_valid; // @[Xbar.scala:159:18]
wire [2:0] tlSlaveXbar_in_0_a_bits_opcode = tlSlaveXbar_anonIn_a_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] tlSlaveXbar_in_0_a_bits_param = tlSlaveXbar_anonIn_a_bits_param; // @[Xbar.scala:159:18]
wire [2:0] tlSlaveXbar_in_0_a_bits_size = tlSlaveXbar_anonIn_a_bits_size; // @[Xbar.scala:159:18]
wire [7:0] tlSlaveXbar__in_0_a_bits_source_T = tlSlaveXbar_anonIn_a_bits_source; // @[Xbar.scala:166:55]
wire [31:0] tlSlaveXbar_in_0_a_bits_address = tlSlaveXbar_anonIn_a_bits_address; // @[Xbar.scala:159:18]
wire [7:0] tlSlaveXbar_in_0_a_bits_mask = tlSlaveXbar_anonIn_a_bits_mask; // @[Xbar.scala:159:18]
wire [63:0] tlSlaveXbar_in_0_a_bits_data = tlSlaveXbar_anonIn_a_bits_data; // @[Xbar.scala:159:18]
wire tlSlaveXbar_in_0_a_bits_corrupt = tlSlaveXbar_anonIn_a_bits_corrupt; // @[Xbar.scala:159:18]
wire tlSlaveXbar_in_0_d_ready = tlSlaveXbar_anonIn_d_ready; // @[Xbar.scala:159:18]
wire tlSlaveXbar_in_0_d_valid; // @[Xbar.scala:159:18]
assign tlSlaveXbar_auto_anon_in_d_valid = tlSlaveXbar_anonIn_d_valid; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_in_0_d_bits_opcode; // @[Xbar.scala:159:18]
assign tlSlaveXbar_auto_anon_in_d_bits_opcode = tlSlaveXbar_anonIn_d_bits_opcode; // @[Xbar.scala:74:9]
wire [2:0] tlSlaveXbar_in_0_d_bits_size; // @[Xbar.scala:159:18]
assign tlSlaveXbar_auto_anon_in_d_bits_size = tlSlaveXbar_anonIn_d_bits_size; // @[Xbar.scala:74:9]
wire [7:0] tlSlaveXbar__anonIn_d_bits_source_T; // @[Xbar.scala:156:69]
assign tlSlaveXbar_auto_anon_in_d_bits_source = tlSlaveXbar_anonIn_d_bits_source; // @[Xbar.scala:74:9]
wire [63:0] tlSlaveXbar_in_0_d_bits_data; // @[Xbar.scala:159:18]
assign tlSlaveXbar_auto_anon_in_d_bits_data = tlSlaveXbar_anonIn_d_bits_data; // @[Xbar.scala:74:9]
wire tlSlaveXbar_portsAOI_filtered_0_ready; // @[Xbar.scala:352:24]
assign tlSlaveXbar_anonIn_a_ready = tlSlaveXbar_in_0_a_ready; // @[Xbar.scala:159:18]
wire tlSlaveXbar__portsAOI_filtered_0_valid_T_1 = tlSlaveXbar_in_0_a_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] tlSlaveXbar_portsAOI_filtered_0_bits_opcode = tlSlaveXbar_in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [2:0] tlSlaveXbar_portsAOI_filtered_0_bits_param = tlSlaveXbar_in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24]
wire [2:0] tlSlaveXbar_portsAOI_filtered_0_bits_size = tlSlaveXbar_in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24]
wire [7:0] tlSlaveXbar_portsAOI_filtered_0_bits_source = tlSlaveXbar_in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24]
wire [31:0] tlSlaveXbar__requestAIO_T = tlSlaveXbar_in_0_a_bits_address; // @[Xbar.scala:159:18]
wire [31:0] tlSlaveXbar_portsAOI_filtered_0_bits_address = tlSlaveXbar_in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [7:0] tlSlaveXbar_portsAOI_filtered_0_bits_mask = tlSlaveXbar_in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24]
wire [63:0] tlSlaveXbar_portsAOI_filtered_0_bits_data = tlSlaveXbar_in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24]
wire tlSlaveXbar_portsAOI_filtered_0_bits_corrupt = tlSlaveXbar_in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
wire tlSlaveXbar_portsDIO_filtered_0_ready = tlSlaveXbar_in_0_d_ready; // @[Xbar.scala:159:18, :352:24]
wire tlSlaveXbar_portsDIO_filtered_0_valid; // @[Xbar.scala:352:24]
assign tlSlaveXbar_anonIn_d_valid = tlSlaveXbar_in_0_d_valid; // @[Xbar.scala:159:18]
wire [2:0] tlSlaveXbar_portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:352:24]
assign tlSlaveXbar_anonIn_d_bits_opcode = tlSlaveXbar_in_0_d_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] tlSlaveXbar_portsDIO_filtered_0_bits_size; // @[Xbar.scala:352:24]
assign tlSlaveXbar_anonIn_d_bits_size = tlSlaveXbar_in_0_d_bits_size; // @[Xbar.scala:159:18]
wire [7:0] tlSlaveXbar_portsDIO_filtered_0_bits_source; // @[Xbar.scala:352:24]
assign tlSlaveXbar__anonIn_d_bits_source_T = tlSlaveXbar_in_0_d_bits_source; // @[Xbar.scala:156:69, :159:18]
wire [63:0] tlSlaveXbar_portsDIO_filtered_0_bits_data; // @[Xbar.scala:352:24]
assign tlSlaveXbar_anonIn_d_bits_data = tlSlaveXbar_in_0_d_bits_data; // @[Xbar.scala:159:18]
assign tlSlaveXbar_in_0_a_bits_source = tlSlaveXbar__in_0_a_bits_source_T; // @[Xbar.scala:159:18, :166:55]
assign tlSlaveXbar_anonIn_d_bits_source = tlSlaveXbar__anonIn_d_bits_source_T; // @[Xbar.scala:156:69]
assign tlSlaveXbar_portsAOI_filtered_0_ready = tlSlaveXbar_out_0_a_ready; // @[Xbar.scala:216:19, :352:24]
wire tlSlaveXbar_portsAOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign tlSlaveXbar_anonOut_a_valid = tlSlaveXbar_out_0_a_valid; // @[Xbar.scala:216:19]
assign tlSlaveXbar_anonOut_a_bits_opcode = tlSlaveXbar_out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign tlSlaveXbar_anonOut_a_bits_param = tlSlaveXbar_out_0_a_bits_param; // @[Xbar.scala:216:19]
assign tlSlaveXbar_anonOut_a_bits_size = tlSlaveXbar_out_0_a_bits_size; // @[Xbar.scala:216:19]
assign tlSlaveXbar_anonOut_a_bits_source = tlSlaveXbar_out_0_a_bits_source; // @[Xbar.scala:216:19]
assign tlSlaveXbar_anonOut_a_bits_address = tlSlaveXbar_out_0_a_bits_address; // @[Xbar.scala:216:19]
assign tlSlaveXbar_anonOut_a_bits_mask = tlSlaveXbar_out_0_a_bits_mask; // @[Xbar.scala:216:19]
assign tlSlaveXbar_anonOut_a_bits_data = tlSlaveXbar_out_0_a_bits_data; // @[Xbar.scala:216:19]
assign tlSlaveXbar_anonOut_a_bits_corrupt = tlSlaveXbar_out_0_a_bits_corrupt; // @[Xbar.scala:216:19]
assign tlSlaveXbar_anonOut_d_ready = tlSlaveXbar_out_0_d_ready; // @[Xbar.scala:216:19]
wire tlSlaveXbar__portsDIO_filtered_0_valid_T_1 = tlSlaveXbar_out_0_d_valid; // @[Xbar.scala:216:19, :355:40]
assign tlSlaveXbar_portsDIO_filtered_0_bits_opcode = tlSlaveXbar_out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_portsDIO_filtered_0_bits_size = tlSlaveXbar_out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24]
wire [7:0] tlSlaveXbar__requestDOI_uncommonBits_T = tlSlaveXbar_out_0_d_bits_source; // @[Xbar.scala:216:19]
assign tlSlaveXbar_portsDIO_filtered_0_bits_source = tlSlaveXbar_out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_portsDIO_filtered_0_bits_data = tlSlaveXbar_out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24]
wire [32:0] tlSlaveXbar__requestAIO_T_1 = {1'h0, tlSlaveXbar__requestAIO_T}; // @[Parameters.scala:137:{31,41}]
wire [7:0] tlSlaveXbar_requestDOI_uncommonBits = tlSlaveXbar__requestDOI_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [12:0] tlSlaveXbar__beatsAI_decode_T = 13'h3F << tlSlaveXbar_in_0_a_bits_size; // @[package.scala:243:71]
wire [5:0] tlSlaveXbar__beatsAI_decode_T_1 = tlSlaveXbar__beatsAI_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] tlSlaveXbar__beatsAI_decode_T_2 = ~tlSlaveXbar__beatsAI_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] tlSlaveXbar_beatsAI_decode = tlSlaveXbar__beatsAI_decode_T_2[5:3]; // @[package.scala:243:46]
wire tlSlaveXbar__beatsAI_opdata_T = tlSlaveXbar_in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18]
wire tlSlaveXbar_beatsAI_opdata = ~tlSlaveXbar__beatsAI_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] tlSlaveXbar_beatsAI_0 = tlSlaveXbar_beatsAI_opdata ? tlSlaveXbar_beatsAI_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
wire [12:0] tlSlaveXbar__beatsDO_decode_T = 13'h3F << tlSlaveXbar_out_0_d_bits_size; // @[package.scala:243:71]
wire [5:0] tlSlaveXbar__beatsDO_decode_T_1 = tlSlaveXbar__beatsDO_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] tlSlaveXbar__beatsDO_decode_T_2 = ~tlSlaveXbar__beatsDO_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] tlSlaveXbar_beatsDO_decode = tlSlaveXbar__beatsDO_decode_T_2[5:3]; // @[package.scala:243:46]
wire tlSlaveXbar_beatsDO_opdata = tlSlaveXbar_out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19]
wire [2:0] tlSlaveXbar_beatsDO_0 = tlSlaveXbar_beatsDO_opdata ? tlSlaveXbar_beatsDO_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
assign tlSlaveXbar_in_0_a_ready = tlSlaveXbar_portsAOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24]
assign tlSlaveXbar_out_0_a_valid = tlSlaveXbar_portsAOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_out_0_a_bits_opcode = tlSlaveXbar_portsAOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_out_0_a_bits_param = tlSlaveXbar_portsAOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_out_0_a_bits_size = tlSlaveXbar_portsAOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_out_0_a_bits_source = tlSlaveXbar_portsAOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_out_0_a_bits_address = tlSlaveXbar_portsAOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_out_0_a_bits_mask = tlSlaveXbar_portsAOI_filtered_0_bits_mask; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_out_0_a_bits_data = tlSlaveXbar_portsAOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_out_0_a_bits_corrupt = tlSlaveXbar_portsAOI_filtered_0_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_portsAOI_filtered_0_valid = tlSlaveXbar__portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign tlSlaveXbar_out_0_d_ready = tlSlaveXbar_portsDIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24]
assign tlSlaveXbar_in_0_d_valid = tlSlaveXbar_portsDIO_filtered_0_valid; // @[Xbar.scala:159:18, :352:24]
assign tlSlaveXbar_in_0_d_bits_opcode = tlSlaveXbar_portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:159:18, :352:24]
assign tlSlaveXbar_in_0_d_bits_size = tlSlaveXbar_portsDIO_filtered_0_bits_size; // @[Xbar.scala:159:18, :352:24]
assign tlSlaveXbar_in_0_d_bits_source = tlSlaveXbar_portsDIO_filtered_0_bits_source; // @[Xbar.scala:159:18, :352:24]
assign tlSlaveXbar_in_0_d_bits_data = tlSlaveXbar_portsDIO_filtered_0_bits_data; // @[Xbar.scala:159:18, :352:24]
assign tlSlaveXbar_portsDIO_filtered_0_valid = tlSlaveXbar__portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
wire hartidOut; // @[MixedNode.scala:542:17]
wire broadcast_nodeIn = broadcast_auto_in; // @[MixedNode.scala:551:17]
wire broadcast_x1_nodeOut; // @[MixedNode.scala:542:17]
wire broadcast_nodeOut; // @[MixedNode.scala:542:17]
wire broadcast_auto_out_1; // @[BundleBridgeNexus.scala:20:9]
wire broadcast_auto_out_0; // @[BundleBridgeNexus.scala:20:9]
wire hartIdSinkNodeIn = broadcast_auto_out_0; // @[MixedNode.scala:551:17]
assign broadcast_nodeOut = broadcast_nodeIn; // @[MixedNode.scala:542:17, :551:17]
assign broadcast_x1_nodeOut = broadcast_nodeIn; // @[MixedNode.scala:542:17, :551:17]
assign broadcast_auto_out_0 = broadcast_nodeOut; // @[MixedNode.scala:542:17]
assign broadcast_auto_out_1 = broadcast_x1_nodeOut; // @[MixedNode.scala:542:17]
wire bpwatchSourceNodeOut_0_valid_0; // @[MixedNode.scala:542:17]
wire broadcast_2_nodeIn_0_valid_0 = broadcast_2_auto_in_0_valid_0; // @[MixedNode.scala:551:17]
wire [2:0] bpwatchSourceNodeOut_0_action; // @[MixedNode.scala:542:17]
wire [2:0] broadcast_2_nodeIn_0_action = broadcast_2_auto_in_0_action; // @[MixedNode.scala:551:17]
wire widget_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_anonIn_a_valid = widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_opcode = widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonIn_a_bits_param = widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonIn_a_bits_size = widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [31:0] widget_anonIn_a_bits_address = widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_bufferable = widget_auto_anon_in_a_bits_user_amba_prot_bufferable; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_modifiable = widget_auto_anon_in_a_bits_user_amba_prot_modifiable; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_readalloc = widget_auto_anon_in_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_writealloc = widget_auto_anon_in_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
wire widget_anonIn_a_bits_user_amba_prot_privileged = widget_auto_anon_in_a_bits_user_amba_prot_privileged; // @[WidthWidget.scala:27:9]
wire [7:0] widget_anonIn_a_bits_mask = widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonIn_a_bits_data = widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_ready = widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire widget_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire widget_anonOut_a_ready = widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] widget_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [31:0] widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17]
wire widget_anonOut_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17]
wire [7:0] widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire widget_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire widget_anonOut_d_valid = widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_anonOut_d_bits_opcode = widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_anonOut_d_bits_param = widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_anonOut_d_bits_size = widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_sink = widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_denied = widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] widget_anonOut_d_bits_data = widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_anonOut_d_bits_corrupt = widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_bufferable; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_modifiable; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_bits_user_amba_prot_privileged; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [31:0] widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_a_ready = widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_out_a_valid = widget_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_opcode = widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_param = widget_anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_size = widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_address = widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_bufferable = widget_anonOut_a_bits_user_amba_prot_bufferable; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_modifiable = widget_anonOut_a_bits_user_amba_prot_modifiable; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_readalloc = widget_anonOut_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_writealloc = widget_anonOut_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_user_amba_prot_privileged = widget_anonOut_a_bits_user_amba_prot_privileged; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_mask = widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_a_bits_data = widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_out_d_ready = widget_anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign widget_anonIn_d_valid = widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_opcode = widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_param = widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_size = widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_sink = widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_denied = widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_data = widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonIn_d_bits_corrupt = widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_a_ready = widget_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign widget_anonOut_a_valid = widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_opcode = widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_param = widget_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_size = widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_address = widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_bufferable = widget_anonIn_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_modifiable = widget_anonIn_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_readalloc = widget_anonIn_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_writealloc = widget_anonIn_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_user_amba_prot_privileged = widget_anonIn_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_mask = widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_a_bits_data = widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_anonOut_d_ready = widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_auto_anon_in_d_valid = widget_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_opcode = widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_param = widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_size = widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_sink = widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_denied = widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_data = widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_auto_anon_in_d_bits_corrupt = widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_1_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_a_valid = widget_1_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_anonIn_a_bits_address = widget_1_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire widget_1_anonIn_a_bits_user_amba_prot_readalloc = widget_1_auto_anon_in_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
wire widget_1_anonIn_a_bits_user_amba_prot_writealloc = widget_1_auto_anon_in_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
wire widget_1_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] widget_1_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] widget_1_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] widget_1_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] widget_1_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire widget_1_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire widget_1_anonOut_a_ready = widget_1_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire widget_1_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [31:0] widget_1_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire widget_1_anonOut_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17]
wire widget_1_anonOut_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17]
wire widget_1_anonOut_d_valid = widget_1_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_anonOut_d_bits_opcode = widget_1_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_1_anonOut_d_bits_param = widget_1_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_anonOut_d_bits_size = widget_1_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire widget_1_anonOut_d_bits_sink = widget_1_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_1_anonOut_d_bits_denied = widget_1_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] widget_1_anonOut_d_bits_data = widget_1_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_1_anonOut_d_bits_corrupt = widget_1_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire [2:0] widget_1_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] widget_1_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] widget_1_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] widget_1_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
wire [31:0] widget_1_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire widget_1_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
assign widget_1_anonIn_a_ready = widget_1_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_auto_anon_out_a_valid = widget_1_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_address = widget_1_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_user_amba_prot_readalloc = widget_1_anonOut_a_bits_user_amba_prot_readalloc; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_out_a_bits_user_amba_prot_writealloc = widget_1_anonOut_a_bits_user_amba_prot_writealloc; // @[WidthWidget.scala:27:9]
assign widget_1_anonIn_d_valid = widget_1_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_opcode = widget_1_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_param = widget_1_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_size = widget_1_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_sink = widget_1_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_denied = widget_1_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_data = widget_1_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonIn_d_bits_corrupt = widget_1_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_auto_anon_in_a_ready = widget_1_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign widget_1_anonOut_a_valid = widget_1_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_address = widget_1_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_user_amba_prot_readalloc = widget_1_anonIn_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_anonOut_a_bits_user_amba_prot_writealloc = widget_1_anonIn_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17, :551:17]
assign widget_1_auto_anon_in_d_valid = widget_1_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_opcode = widget_1_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_param = widget_1_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_size = widget_1_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_sink = widget_1_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_denied = widget_1_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_data = widget_1_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign widget_1_auto_anon_in_d_bits_corrupt = widget_1_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire buffer_nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire tlOtherMastersNodeOut_a_ready = buffer_auto_in_a_ready; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_a_valid; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_valid = buffer_auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] tlOtherMastersNodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] buffer_nodeIn_a_bits_opcode = buffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] tlOtherMastersNodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] buffer_nodeIn_a_bits_param = buffer_auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [3:0] tlOtherMastersNodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] buffer_nodeIn_a_bits_size = buffer_auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_bits_source = buffer_auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] tlOtherMastersNodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [31:0] buffer_nodeIn_a_bits_address = buffer_auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_bits_user_amba_prot_bufferable = buffer_auto_in_a_bits_user_amba_prot_bufferable; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_bits_user_amba_prot_modifiable = buffer_auto_in_a_bits_user_amba_prot_modifiable; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_bits_user_amba_prot_readalloc = buffer_auto_in_a_bits_user_amba_prot_readalloc; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_bits_user_amba_prot_writealloc = buffer_auto_in_a_bits_user_amba_prot_writealloc; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_bits_user_amba_prot_privileged = buffer_auto_in_a_bits_user_amba_prot_privileged; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_a_bits_user_amba_prot_secure; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_bits_user_amba_prot_secure = buffer_auto_in_a_bits_user_amba_prot_secure; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_a_bits_user_amba_prot_fetch = buffer_auto_in_a_bits_user_amba_prot_fetch; // @[Buffer.scala:40:9]
wire [7:0] tlOtherMastersNodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [7:0] buffer_nodeIn_a_bits_mask = buffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] tlOtherMastersNodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire [63:0] buffer_nodeIn_a_bits_data = buffer_auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_d_ready; // @[MixedNode.scala:542:17]
wire buffer_nodeIn_d_ready = buffer_auto_in_d_ready; // @[Buffer.scala:40:9]
wire buffer_nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] buffer_nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire tlOtherMastersNodeOut_d_valid = buffer_auto_in_d_valid; // @[Buffer.scala:40:9]
wire [1:0] buffer_nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] tlOtherMastersNodeOut_d_bits_opcode = buffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9]
wire [3:0] buffer_nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] tlOtherMastersNodeOut_d_bits_param = buffer_auto_in_d_bits_param; // @[Buffer.scala:40:9]
wire buffer_nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [3:0] tlOtherMastersNodeOut_d_bits_size = buffer_auto_in_d_bits_size; // @[Buffer.scala:40:9]
wire buffer_nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire tlOtherMastersNodeOut_d_bits_source = buffer_auto_in_d_bits_source; // @[Buffer.scala:40:9]
wire buffer_nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire tlOtherMastersNodeOut_d_bits_sink = buffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9]
wire [63:0] buffer_nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire tlOtherMastersNodeOut_d_bits_denied = buffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9]
wire buffer_nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire [63:0] tlOtherMastersNodeOut_d_bits_data = buffer_auto_in_d_bits_data; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeOut_d_bits_corrupt = buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_ready = buffer_auto_out_a_ready; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_valid; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_valid_0 = buffer_auto_out_a_valid; // @[Buffer.scala:40:9]
wire [2:0] buffer_nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_opcode_0 = buffer_auto_out_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] buffer_nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_param_0 = buffer_auto_out_a_bits_param; // @[Buffer.scala:40:9]
wire [3:0] buffer_nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_size_0 = buffer_auto_out_a_bits_size; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_source_0 = buffer_auto_out_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] buffer_nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_address_0 = buffer_auto_out_a_bits_address; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_user_amba_prot_bufferable_0 = buffer_auto_out_a_bits_user_amba_prot_bufferable; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_user_amba_prot_modifiable_0 = buffer_auto_out_a_bits_user_amba_prot_modifiable; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_user_amba_prot_readalloc_0 = buffer_auto_out_a_bits_user_amba_prot_readalloc; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_user_amba_prot_writealloc_0 = buffer_auto_out_a_bits_user_amba_prot_writealloc; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_user_amba_prot_privileged_0 = buffer_auto_out_a_bits_user_amba_prot_privileged; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_bits_user_amba_prot_secure; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_user_amba_prot_secure_0 = buffer_auto_out_a_bits_user_amba_prot_secure; // @[Buffer.scala:40:9]
wire buffer_nodeOut_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_user_amba_prot_fetch_0 = buffer_auto_out_a_bits_user_amba_prot_fetch; // @[Buffer.scala:40:9]
wire [7:0] buffer_nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_mask_0 = buffer_auto_out_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] buffer_nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
assign auto_buffer_out_a_bits_data_0 = buffer_auto_out_a_bits_data; // @[Buffer.scala:40:9]
wire buffer_nodeOut_d_ready; // @[MixedNode.scala:542:17]
assign auto_buffer_out_d_ready_0 = buffer_auto_out_d_ready; // @[Buffer.scala:40:9]
wire buffer_nodeOut_d_valid = buffer_auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] buffer_nodeOut_d_bits_opcode = buffer_auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] buffer_nodeOut_d_bits_param = buffer_auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [3:0] buffer_nodeOut_d_bits_size = buffer_auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire buffer_nodeOut_d_bits_source = buffer_auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire buffer_nodeOut_d_bits_sink = buffer_auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire buffer_nodeOut_d_bits_denied = buffer_auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] buffer_nodeOut_d_bits_data = buffer_auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire buffer_nodeOut_d_bits_corrupt = buffer_auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
assign buffer_nodeIn_a_ready = buffer_nodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign buffer_auto_out_a_valid = buffer_nodeOut_a_valid; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_opcode = buffer_nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_param = buffer_nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_size = buffer_nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_source = buffer_nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_address = buffer_nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_user_amba_prot_bufferable = buffer_nodeOut_a_bits_user_amba_prot_bufferable; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_user_amba_prot_modifiable = buffer_nodeOut_a_bits_user_amba_prot_modifiable; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_user_amba_prot_readalloc = buffer_nodeOut_a_bits_user_amba_prot_readalloc; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_user_amba_prot_writealloc = buffer_nodeOut_a_bits_user_amba_prot_writealloc; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_user_amba_prot_privileged = buffer_nodeOut_a_bits_user_amba_prot_privileged; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_user_amba_prot_secure = buffer_nodeOut_a_bits_user_amba_prot_secure; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_user_amba_prot_fetch = buffer_nodeOut_a_bits_user_amba_prot_fetch; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_mask = buffer_nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign buffer_auto_out_a_bits_data = buffer_nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign buffer_auto_out_d_ready = buffer_nodeOut_d_ready; // @[Buffer.scala:40:9]
assign buffer_nodeIn_d_valid = buffer_nodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_opcode = buffer_nodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_param = buffer_nodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_size = buffer_nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_source = buffer_nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_sink = buffer_nodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_denied = buffer_nodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_data = buffer_nodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeIn_d_bits_corrupt = buffer_nodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign buffer_auto_in_a_ready = buffer_nodeIn_a_ready; // @[Buffer.scala:40:9]
assign buffer_nodeOut_a_valid = buffer_nodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_opcode = buffer_nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_param = buffer_nodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_size = buffer_nodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_source = buffer_nodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_address = buffer_nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_user_amba_prot_bufferable = buffer_nodeIn_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_user_amba_prot_modifiable = buffer_nodeIn_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_user_amba_prot_readalloc = buffer_nodeIn_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_user_amba_prot_writealloc = buffer_nodeIn_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_user_amba_prot_privileged = buffer_nodeIn_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_user_amba_prot_secure = buffer_nodeIn_a_bits_user_amba_prot_secure; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_user_amba_prot_fetch = buffer_nodeIn_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_mask = buffer_nodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_a_bits_data = buffer_nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign buffer_nodeOut_d_ready = buffer_nodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign buffer_auto_in_d_valid = buffer_nodeIn_d_valid; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_opcode = buffer_nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_param = buffer_nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_size = buffer_nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_source = buffer_nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_sink = buffer_nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_denied = buffer_nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_data = buffer_nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign buffer_auto_in_d_bits_corrupt = buffer_nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
wire buffer_1_nodeIn_a_ready; // @[MixedNode.scala:551:17]
assign auto_buffer_in_a_ready_0 = buffer_1_auto_in_a_ready; // @[Buffer.scala:40:9]
wire buffer_1_nodeIn_a_valid = buffer_1_auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_nodeIn_a_bits_opcode = buffer_1_auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_nodeIn_a_bits_param = buffer_1_auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_nodeIn_a_bits_size = buffer_1_auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_nodeIn_a_bits_source = buffer_1_auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] buffer_1_nodeIn_a_bits_address = buffer_1_auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_nodeIn_a_bits_mask = buffer_1_auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] buffer_1_nodeIn_a_bits_data = buffer_1_auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire buffer_1_nodeIn_a_bits_corrupt = buffer_1_auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire buffer_1_nodeIn_d_ready = buffer_1_auto_in_d_ready; // @[Buffer.scala:40:9]
wire buffer_1_nodeIn_d_valid; // @[MixedNode.scala:551:17]
assign auto_buffer_in_d_valid_0 = buffer_1_auto_in_d_valid; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
assign auto_buffer_in_d_bits_opcode_0 = buffer_1_auto_in_d_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
assign auto_buffer_in_d_bits_size_0 = buffer_1_auto_in_d_bits_size; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
assign auto_buffer_in_d_bits_source_0 = buffer_1_auto_in_d_bits_source; // @[Buffer.scala:40:9]
wire [63:0] buffer_1_nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
assign auto_buffer_in_d_bits_data_0 = buffer_1_auto_in_d_bits_data; // @[Buffer.scala:40:9]
wire slaveNodeIn_a_ready; // @[MixedNode.scala:551:17]
wire buffer_1_nodeOut_a_ready = buffer_1_auto_out_a_ready; // @[Buffer.scala:40:9]
wire buffer_1_nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] buffer_1_nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire slaveNodeIn_a_valid = buffer_1_auto_out_a_valid; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] slaveNodeIn_a_bits_opcode = buffer_1_auto_out_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] buffer_1_nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] slaveNodeIn_a_bits_param = buffer_1_auto_out_a_bits_param; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [2:0] slaveNodeIn_a_bits_size = buffer_1_auto_out_a_bits_size; // @[Buffer.scala:40:9]
wire [31:0] buffer_1_nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] slaveNodeIn_a_bits_source = buffer_1_auto_out_a_bits_source; // @[Buffer.scala:40:9]
wire [7:0] buffer_1_nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [31:0] slaveNodeIn_a_bits_address = buffer_1_auto_out_a_bits_address; // @[Buffer.scala:40:9]
wire [63:0] buffer_1_nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire [7:0] slaveNodeIn_a_bits_mask = buffer_1_auto_out_a_bits_mask; // @[Buffer.scala:40:9]
wire buffer_1_nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire [63:0] slaveNodeIn_a_bits_data = buffer_1_auto_out_a_bits_data; // @[Buffer.scala:40:9]
wire buffer_1_nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire slaveNodeIn_a_bits_corrupt = buffer_1_auto_out_a_bits_corrupt; // @[Buffer.scala:40:9]
wire slaveNodeIn_d_ready = buffer_1_auto_out_d_ready; // @[Buffer.scala:40:9]
wire slaveNodeIn_d_valid; // @[MixedNode.scala:551:17]
wire buffer_1_nodeOut_d_valid = buffer_1_auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] slaveNodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] buffer_1_nodeOut_d_bits_opcode = buffer_1_auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] slaveNodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [2:0] buffer_1_nodeOut_d_bits_size = buffer_1_auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [7:0] slaveNodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [7:0] buffer_1_nodeOut_d_bits_source = buffer_1_auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [63:0] slaveNodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire [63:0] buffer_1_nodeOut_d_bits_data = buffer_1_auto_out_d_bits_data; // @[Buffer.scala:40:9]
assign buffer_1_nodeIn_a_ready = buffer_1_nodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_auto_out_a_valid = buffer_1_nodeOut_a_valid; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_a_bits_opcode = buffer_1_nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_a_bits_param = buffer_1_nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_a_bits_size = buffer_1_nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_a_bits_source = buffer_1_nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_a_bits_address = buffer_1_nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_a_bits_mask = buffer_1_nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_a_bits_data = buffer_1_nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_a_bits_corrupt = buffer_1_nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_d_ready = buffer_1_nodeOut_d_ready; // @[Buffer.scala:40:9]
assign buffer_1_nodeIn_d_valid = buffer_1_nodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeIn_d_bits_opcode = buffer_1_nodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeIn_d_bits_size = buffer_1_nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeIn_d_bits_source = buffer_1_nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeIn_d_bits_data = buffer_1_nodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_auto_in_a_ready = buffer_1_nodeIn_a_ready; // @[Buffer.scala:40:9]
assign buffer_1_nodeOut_a_valid = buffer_1_nodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeOut_a_bits_opcode = buffer_1_nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeOut_a_bits_param = buffer_1_nodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeOut_a_bits_size = buffer_1_nodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeOut_a_bits_source = buffer_1_nodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeOut_a_bits_address = buffer_1_nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeOut_a_bits_mask = buffer_1_nodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeOut_a_bits_data = buffer_1_nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeOut_a_bits_corrupt = buffer_1_nodeIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_nodeOut_d_ready = buffer_1_nodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_auto_in_d_valid = buffer_1_nodeIn_d_valid; // @[Buffer.scala:40:9]
assign buffer_1_auto_in_d_bits_opcode = buffer_1_nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign buffer_1_auto_in_d_bits_size = buffer_1_nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign buffer_1_auto_in_d_bits_source = buffer_1_nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign buffer_1_auto_in_d_bits_data = buffer_1_nodeIn_d_bits_data; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_a_ready = tlOtherMastersNodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
wire tlOtherMastersNodeIn_a_valid; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_valid = tlOtherMastersNodeOut_a_valid; // @[Buffer.scala:40:9]
wire [2:0] tlOtherMastersNodeIn_a_bits_opcode; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_opcode = tlOtherMastersNodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] tlOtherMastersNodeIn_a_bits_param; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_param = tlOtherMastersNodeOut_a_bits_param; // @[Buffer.scala:40:9]
wire [3:0] tlOtherMastersNodeIn_a_bits_size; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_size = tlOtherMastersNodeOut_a_bits_size; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_a_bits_source; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_source = tlOtherMastersNodeOut_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] tlOtherMastersNodeIn_a_bits_address; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_address = tlOtherMastersNodeOut_a_bits_address; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_user_amba_prot_bufferable = tlOtherMastersNodeOut_a_bits_user_amba_prot_bufferable; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_user_amba_prot_modifiable = tlOtherMastersNodeOut_a_bits_user_amba_prot_modifiable; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_user_amba_prot_readalloc = tlOtherMastersNodeOut_a_bits_user_amba_prot_readalloc; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_user_amba_prot_writealloc = tlOtherMastersNodeOut_a_bits_user_amba_prot_writealloc; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_user_amba_prot_privileged = tlOtherMastersNodeOut_a_bits_user_amba_prot_privileged; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_a_bits_user_amba_prot_secure; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_user_amba_prot_secure = tlOtherMastersNodeOut_a_bits_user_amba_prot_secure; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_user_amba_prot_fetch = tlOtherMastersNodeOut_a_bits_user_amba_prot_fetch; // @[Buffer.scala:40:9]
wire [7:0] tlOtherMastersNodeIn_a_bits_mask; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_mask = tlOtherMastersNodeOut_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] tlOtherMastersNodeIn_a_bits_data; // @[MixedNode.scala:551:17]
assign buffer_auto_in_a_bits_data = tlOtherMastersNodeOut_a_bits_data; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_d_ready; // @[MixedNode.scala:551:17]
assign buffer_auto_in_d_ready = tlOtherMastersNodeOut_d_ready; // @[Buffer.scala:40:9]
wire tlOtherMastersNodeIn_d_valid = tlOtherMastersNodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] tlOtherMastersNodeIn_d_bits_opcode = tlOtherMastersNodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [1:0] tlOtherMastersNodeIn_d_bits_param = tlOtherMastersNodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] tlOtherMastersNodeIn_d_bits_size = tlOtherMastersNodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
wire tlOtherMastersNodeIn_d_bits_source = tlOtherMastersNodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire tlOtherMastersNodeIn_d_bits_sink = tlOtherMastersNodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
wire tlOtherMastersNodeIn_d_bits_denied = tlOtherMastersNodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] tlOtherMastersNodeIn_d_bits_data = tlOtherMastersNodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire tlOtherMastersNodeIn_d_bits_corrupt = tlOtherMastersNodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_valid = tlOtherMastersNodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_opcode = tlOtherMastersNodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_param = tlOtherMastersNodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_size = tlOtherMastersNodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_source = tlOtherMastersNodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_address = tlOtherMastersNodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_user_amba_prot_bufferable = tlOtherMastersNodeIn_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_user_amba_prot_modifiable = tlOtherMastersNodeIn_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_user_amba_prot_readalloc = tlOtherMastersNodeIn_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_user_amba_prot_writealloc = tlOtherMastersNodeIn_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_user_amba_prot_privileged = tlOtherMastersNodeIn_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_user_amba_prot_secure = tlOtherMastersNodeIn_a_bits_user_amba_prot_secure; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_user_amba_prot_fetch = tlOtherMastersNodeIn_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_mask = tlOtherMastersNodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_a_bits_data = tlOtherMastersNodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign tlOtherMastersNodeOut_d_ready = tlOtherMastersNodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign broadcast_auto_in = hartidOut; // @[MixedNode.scala:542:17]
assign hartidOut = hartidIn; // @[MixedNode.scala:542:17, :551:17]
assign auto_trace_source_out_insns_0_valid_0 = traceSourceNodeOut_insns_0_valid; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_iaddr_0 = traceSourceNodeOut_insns_0_iaddr; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_insn_0 = traceSourceNodeOut_insns_0_insn; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_priv_0 = traceSourceNodeOut_insns_0_priv; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_exception_0 = traceSourceNodeOut_insns_0_exception; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_interrupt_0 = traceSourceNodeOut_insns_0_interrupt; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_cause_0 = traceSourceNodeOut_insns_0_cause; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_tval_0 = traceSourceNodeOut_insns_0_tval; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_time_0 = traceSourceNodeOut_time; // @[RocketTile.scala:141:7]
assign broadcast_2_auto_in_0_valid_0 = bpwatchSourceNodeOut_0_valid_0; // @[MixedNode.scala:542:17]
assign broadcast_2_auto_in_0_action = bpwatchSourceNodeOut_0_action; // @[MixedNode.scala:542:17]
wire int_localOut_0; // @[MixedNode.scala:542:17]
wire x1_int_localOut_0; // @[MixedNode.scala:542:17]
wire x1_int_localOut_1; // @[MixedNode.scala:542:17]
wire x1_int_localOut_1_0; // @[MixedNode.scala:542:17]
assign int_localOut_0 = int_localIn_0; // @[MixedNode.scala:542:17, :551:17]
assign x1_int_localOut_0 = x1_int_localIn_0; // @[MixedNode.scala:542:17, :551:17]
assign x1_int_localOut_1 = x1_int_localIn_1; // @[MixedNode.scala:542:17, :551:17]
assign x1_int_localOut_1_0 = x1_int_localIn_1_0; // @[MixedNode.scala:542:17, :551:17]
wire intSinkNodeIn_0; // @[MixedNode.scala:551:17]
wire intSinkNodeIn_1; // @[MixedNode.scala:551:17]
wire intSinkNodeIn_2; // @[MixedNode.scala:551:17]
wire intSinkNodeIn_3; // @[MixedNode.scala:551:17]
assign auto_wfi_out_0_0 = wfiNodeOut_0; // @[RocketTile.scala:141:7]
assign slaveNodeIn_a_ready = slaveNodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign tlSlaveXbar_auto_anon_in_a_valid = slaveNodeOut_a_valid; // @[Xbar.scala:74:9]
assign tlSlaveXbar_auto_anon_in_a_bits_opcode = slaveNodeOut_a_bits_opcode; // @[Xbar.scala:74:9]
assign tlSlaveXbar_auto_anon_in_a_bits_param = slaveNodeOut_a_bits_param; // @[Xbar.scala:74:9]
assign tlSlaveXbar_auto_anon_in_a_bits_size = slaveNodeOut_a_bits_size; // @[Xbar.scala:74:9]
assign tlSlaveXbar_auto_anon_in_a_bits_source = slaveNodeOut_a_bits_source; // @[Xbar.scala:74:9]
assign tlSlaveXbar_auto_anon_in_a_bits_address = slaveNodeOut_a_bits_address; // @[Xbar.scala:74:9]
assign tlSlaveXbar_auto_anon_in_a_bits_mask = slaveNodeOut_a_bits_mask; // @[Xbar.scala:74:9]
assign tlSlaveXbar_auto_anon_in_a_bits_data = slaveNodeOut_a_bits_data; // @[Xbar.scala:74:9]
assign tlSlaveXbar_auto_anon_in_a_bits_corrupt = slaveNodeOut_a_bits_corrupt; // @[Xbar.scala:74:9]
assign tlSlaveXbar_auto_anon_in_d_ready = slaveNodeOut_d_ready; // @[Xbar.scala:74:9]
assign slaveNodeIn_d_valid = slaveNodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeIn_d_bits_opcode = slaveNodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeIn_d_bits_size = slaveNodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeIn_d_bits_source = slaveNodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeIn_d_bits_data = slaveNodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_auto_out_a_ready = slaveNodeIn_a_ready; // @[Buffer.scala:40:9]
assign slaveNodeOut_a_valid = slaveNodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeOut_a_bits_opcode = slaveNodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeOut_a_bits_param = slaveNodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeOut_a_bits_size = slaveNodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeOut_a_bits_source = slaveNodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeOut_a_bits_address = slaveNodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeOut_a_bits_mask = slaveNodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeOut_a_bits_data = slaveNodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeOut_a_bits_corrupt = slaveNodeIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign slaveNodeOut_d_ready = slaveNodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign buffer_1_auto_out_d_valid = slaveNodeIn_d_valid; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_d_bits_opcode = slaveNodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_d_bits_size = slaveNodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_d_bits_source = slaveNodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign buffer_1_auto_out_d_bits_data = slaveNodeIn_d_bits_data; // @[Buffer.scala:40:9]
reg wfiNodeOut_0_REG; // @[Interrupts.scala:131:36]
assign wfiNodeOut_0 = wfiNodeOut_0_REG; // @[Interrupts.scala:131:36]
always @(posedge clock) begin // @[RocketTile.scala:141:7]
if (reset) // @[RocketTile.scala:141:7]
wfiNodeOut_0_REG <= 1'h0; // @[Interrupts.scala:131:36]
else // @[RocketTile.scala:141:7]
wfiNodeOut_0_REG <= _core_io_wfi; // @[RocketTile.scala:147:20]
always @(posedge)
TLXbar_MasterXbar_RocketTile_i2_o1_a32d64s1k1z4u tlMasterXbar ( // @[HierarchicalElement.scala:55:42]
.clock (clock),
.reset (reset),
.auto_anon_in_1_a_ready (widget_1_auto_anon_out_a_ready),
.auto_anon_in_1_a_valid (widget_1_auto_anon_out_a_valid), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_a_bits_address (widget_1_auto_anon_out_a_bits_address), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_a_bits_user_amba_prot_readalloc (widget_1_auto_anon_out_a_bits_user_amba_prot_readalloc), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_a_bits_user_amba_prot_writealloc (widget_1_auto_anon_out_a_bits_user_amba_prot_writealloc), // @[WidthWidget.scala:27:9]
.auto_anon_in_1_d_valid (widget_1_auto_anon_out_d_valid),
.auto_anon_in_1_d_bits_opcode (widget_1_auto_anon_out_d_bits_opcode),
.auto_anon_in_1_d_bits_param (widget_1_auto_anon_out_d_bits_param),
.auto_anon_in_1_d_bits_size (widget_1_auto_anon_out_d_bits_size),
.auto_anon_in_1_d_bits_sink (widget_1_auto_anon_out_d_bits_sink),
.auto_anon_in_1_d_bits_denied (widget_1_auto_anon_out_d_bits_denied),
.auto_anon_in_1_d_bits_data (widget_1_auto_anon_out_d_bits_data),
.auto_anon_in_1_d_bits_corrupt (widget_1_auto_anon_out_d_bits_corrupt),
.auto_anon_in_0_a_ready (widget_auto_anon_out_a_ready),
.auto_anon_in_0_a_valid (widget_auto_anon_out_a_valid), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_opcode (widget_auto_anon_out_a_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_param (widget_auto_anon_out_a_bits_param), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_size (widget_auto_anon_out_a_bits_size), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_address (widget_auto_anon_out_a_bits_address), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_user_amba_prot_bufferable (widget_auto_anon_out_a_bits_user_amba_prot_bufferable), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_user_amba_prot_modifiable (widget_auto_anon_out_a_bits_user_amba_prot_modifiable), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_user_amba_prot_readalloc (widget_auto_anon_out_a_bits_user_amba_prot_readalloc), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_user_amba_prot_writealloc (widget_auto_anon_out_a_bits_user_amba_prot_writealloc), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_user_amba_prot_privileged (widget_auto_anon_out_a_bits_user_amba_prot_privileged), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_mask (widget_auto_anon_out_a_bits_mask), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_a_bits_data (widget_auto_anon_out_a_bits_data), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_d_ready (widget_auto_anon_out_d_ready), // @[WidthWidget.scala:27:9]
.auto_anon_in_0_d_valid (widget_auto_anon_out_d_valid),
.auto_anon_in_0_d_bits_opcode (widget_auto_anon_out_d_bits_opcode),
.auto_anon_in_0_d_bits_param (widget_auto_anon_out_d_bits_param),
.auto_anon_in_0_d_bits_size (widget_auto_anon_out_d_bits_size),
.auto_anon_in_0_d_bits_sink (widget_auto_anon_out_d_bits_sink),
.auto_anon_in_0_d_bits_denied (widget_auto_anon_out_d_bits_denied),
.auto_anon_in_0_d_bits_data (widget_auto_anon_out_d_bits_data),
.auto_anon_in_0_d_bits_corrupt (widget_auto_anon_out_d_bits_corrupt),
.auto_anon_out_a_ready (tlOtherMastersNodeIn_a_ready), // @[MixedNode.scala:551:17]
.auto_anon_out_a_valid (tlOtherMastersNodeIn_a_valid),
.auto_anon_out_a_bits_opcode (tlOtherMastersNodeIn_a_bits_opcode),
.auto_anon_out_a_bits_param (tlOtherMastersNodeIn_a_bits_param),
.auto_anon_out_a_bits_size (tlOtherMastersNodeIn_a_bits_size),
.auto_anon_out_a_bits_source (tlOtherMastersNodeIn_a_bits_source),
.auto_anon_out_a_bits_address (tlOtherMastersNodeIn_a_bits_address),
.auto_anon_out_a_bits_user_amba_prot_bufferable (tlOtherMastersNodeIn_a_bits_user_amba_prot_bufferable),
.auto_anon_out_a_bits_user_amba_prot_modifiable (tlOtherMastersNodeIn_a_bits_user_amba_prot_modifiable),
.auto_anon_out_a_bits_user_amba_prot_readalloc (tlOtherMastersNodeIn_a_bits_user_amba_prot_readalloc),
.auto_anon_out_a_bits_user_amba_prot_writealloc (tlOtherMastersNodeIn_a_bits_user_amba_prot_writealloc),
.auto_anon_out_a_bits_user_amba_prot_privileged (tlOtherMastersNodeIn_a_bits_user_amba_prot_privileged),
.auto_anon_out_a_bits_user_amba_prot_secure (tlOtherMastersNodeIn_a_bits_user_amba_prot_secure),
.auto_anon_out_a_bits_user_amba_prot_fetch (tlOtherMastersNodeIn_a_bits_user_amba_prot_fetch),
.auto_anon_out_a_bits_mask (tlOtherMastersNodeIn_a_bits_mask),
.auto_anon_out_a_bits_data (tlOtherMastersNodeIn_a_bits_data),
.auto_anon_out_d_ready (tlOtherMastersNodeIn_d_ready),
.auto_anon_out_d_valid (tlOtherMastersNodeIn_d_valid), // @[MixedNode.scala:551:17]
.auto_anon_out_d_bits_opcode (tlOtherMastersNodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.auto_anon_out_d_bits_param (tlOtherMastersNodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.auto_anon_out_d_bits_size (tlOtherMastersNodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.auto_anon_out_d_bits_source (tlOtherMastersNodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.auto_anon_out_d_bits_sink (tlOtherMastersNodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.auto_anon_out_d_bits_denied (tlOtherMastersNodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.auto_anon_out_d_bits_data (tlOtherMastersNodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.auto_anon_out_d_bits_corrupt (tlOtherMastersNodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[HierarchicalElement.scala:55:42]
IntXbar_i3_o1 intXbar ( // @[HierarchicalElement.scala:57:37]
.auto_anon_in_2_0 (x1_int_localOut_1_0), // @[MixedNode.scala:542:17]
.auto_anon_in_1_0 (x1_int_localOut_0), // @[MixedNode.scala:542:17]
.auto_anon_in_1_1 (x1_int_localOut_1), // @[MixedNode.scala:542:17]
.auto_anon_in_0_0 (int_localOut_0), // @[MixedNode.scala:542:17]
.auto_anon_out_0 (intSinkNodeIn_0),
.auto_anon_out_1 (intSinkNodeIn_1),
.auto_anon_out_2 (intSinkNodeIn_2),
.auto_anon_out_3 (intSinkNodeIn_3)
); // @[HierarchicalElement.scala:57:37]
DCache dcache ( // @[HellaCache.scala:278:43]
.clock (clock),
.reset (reset),
.auto_hart_id_sink_in (broadcast_auto_out_1), // @[BundleBridgeNexus.scala:20:9]
.auto_out_a_ready (widget_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9]
.auto_out_a_valid (widget_auto_anon_in_a_valid),
.auto_out_a_bits_opcode (widget_auto_anon_in_a_bits_opcode),
.auto_out_a_bits_param (widget_auto_anon_in_a_bits_param),
.auto_out_a_bits_size (widget_auto_anon_in_a_bits_size),
.auto_out_a_bits_address (widget_auto_anon_in_a_bits_address),
.auto_out_a_bits_user_amba_prot_bufferable (widget_auto_anon_in_a_bits_user_amba_prot_bufferable),
.auto_out_a_bits_user_amba_prot_modifiable (widget_auto_anon_in_a_bits_user_amba_prot_modifiable),
.auto_out_a_bits_user_amba_prot_readalloc (widget_auto_anon_in_a_bits_user_amba_prot_readalloc),
.auto_out_a_bits_user_amba_prot_writealloc (widget_auto_anon_in_a_bits_user_amba_prot_writealloc),
.auto_out_a_bits_user_amba_prot_privileged (widget_auto_anon_in_a_bits_user_amba_prot_privileged),
.auto_out_a_bits_mask (widget_auto_anon_in_a_bits_mask),
.auto_out_a_bits_data (widget_auto_anon_in_a_bits_data),
.auto_out_d_ready (widget_auto_anon_in_d_ready),
.auto_out_d_valid (widget_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_opcode (widget_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_param (widget_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_size (widget_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_sink (widget_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_denied (widget_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_data (widget_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9]
.auto_out_d_bits_corrupt (widget_auto_anon_in_d_bits_corrupt), // @[WidthWidget.scala:27:9]
.io_cpu_req_ready (_dcache_io_cpu_req_ready),
.io_cpu_req_valid (_dcacheArb_io_mem_req_valid), // @[HellaCache.scala:292:25]
.io_cpu_req_bits_addr (_dcacheArb_io_mem_req_bits_addr), // @[HellaCache.scala:292:25]
.io_cpu_req_bits_tag (_dcacheArb_io_mem_req_bits_tag), // @[HellaCache.scala:292:25]
.io_cpu_req_bits_cmd (_dcacheArb_io_mem_req_bits_cmd), // @[HellaCache.scala:292:25]
.io_cpu_req_bits_size (_dcacheArb_io_mem_req_bits_size), // @[HellaCache.scala:292:25]
.io_cpu_req_bits_signed (_dcacheArb_io_mem_req_bits_signed), // @[HellaCache.scala:292:25]
.io_cpu_req_bits_dprv (_dcacheArb_io_mem_req_bits_dprv), // @[HellaCache.scala:292:25]
.io_cpu_req_bits_dv (_dcacheArb_io_mem_req_bits_dv), // @[HellaCache.scala:292:25]
.io_cpu_req_bits_phys (_dcacheArb_io_mem_req_bits_phys), // @[HellaCache.scala:292:25]
.io_cpu_req_bits_no_resp (_dcacheArb_io_mem_req_bits_no_resp), // @[HellaCache.scala:292:25]
.io_cpu_req_bits_no_xcpt (_dcacheArb_io_mem_req_bits_no_xcpt), // @[HellaCache.scala:292:25]
.io_cpu_s1_kill (_dcacheArb_io_mem_s1_kill), // @[HellaCache.scala:292:25]
.io_cpu_s1_data_data (_dcacheArb_io_mem_s1_data_data), // @[HellaCache.scala:292:25]
.io_cpu_s1_data_mask (_dcacheArb_io_mem_s1_data_mask), // @[HellaCache.scala:292:25]
.io_cpu_s2_nack (_dcache_io_cpu_s2_nack),
.io_cpu_s2_nack_cause_raw (_dcache_io_cpu_s2_nack_cause_raw),
.io_cpu_s2_uncached (_dcache_io_cpu_s2_uncached),
.io_cpu_s2_paddr (_dcache_io_cpu_s2_paddr),
.io_cpu_resp_valid (_dcache_io_cpu_resp_valid),
.io_cpu_resp_bits_addr (_dcache_io_cpu_resp_bits_addr),
.io_cpu_resp_bits_tag (_dcache_io_cpu_resp_bits_tag),
.io_cpu_resp_bits_cmd (_dcache_io_cpu_resp_bits_cmd),
.io_cpu_resp_bits_size (_dcache_io_cpu_resp_bits_size),
.io_cpu_resp_bits_signed (_dcache_io_cpu_resp_bits_signed),
.io_cpu_resp_bits_dprv (_dcache_io_cpu_resp_bits_dprv),
.io_cpu_resp_bits_dv (_dcache_io_cpu_resp_bits_dv),
.io_cpu_resp_bits_data (_dcache_io_cpu_resp_bits_data),
.io_cpu_resp_bits_mask (_dcache_io_cpu_resp_bits_mask),
.io_cpu_resp_bits_replay (_dcache_io_cpu_resp_bits_replay),
.io_cpu_resp_bits_has_data (_dcache_io_cpu_resp_bits_has_data),
.io_cpu_resp_bits_data_word_bypass (_dcache_io_cpu_resp_bits_data_word_bypass),
.io_cpu_resp_bits_data_raw (_dcache_io_cpu_resp_bits_data_raw),
.io_cpu_resp_bits_store_data (_dcache_io_cpu_resp_bits_store_data),
.io_cpu_replay_next (_dcache_io_cpu_replay_next),
.io_cpu_s2_xcpt_ma_ld (_dcache_io_cpu_s2_xcpt_ma_ld),
.io_cpu_s2_xcpt_ma_st (_dcache_io_cpu_s2_xcpt_ma_st),
.io_cpu_s2_xcpt_pf_ld (_dcache_io_cpu_s2_xcpt_pf_ld),
.io_cpu_s2_xcpt_pf_st (_dcache_io_cpu_s2_xcpt_pf_st),
.io_cpu_s2_xcpt_ae_ld (_dcache_io_cpu_s2_xcpt_ae_ld),
.io_cpu_s2_xcpt_ae_st (_dcache_io_cpu_s2_xcpt_ae_st),
.io_cpu_s2_gpa (_dcache_io_cpu_s2_gpa),
.io_cpu_ordered (_dcache_io_cpu_ordered),
.io_cpu_store_pending (_dcache_io_cpu_store_pending),
.io_cpu_perf_acquire (_dcache_io_cpu_perf_acquire),
.io_cpu_perf_grant (_dcache_io_cpu_perf_grant),
.io_cpu_perf_blocked (_dcache_io_cpu_perf_blocked),
.io_cpu_perf_canAcceptStoreThenLoad (_dcache_io_cpu_perf_canAcceptStoreThenLoad),
.io_cpu_perf_canAcceptStoreThenRMW (_dcache_io_cpu_perf_canAcceptStoreThenRMW),
.io_cpu_perf_canAcceptLoadThenLoad (_dcache_io_cpu_perf_canAcceptLoadThenLoad),
.io_cpu_perf_storeBufferEmptyAfterLoad (_dcache_io_cpu_perf_storeBufferEmptyAfterLoad),
.io_cpu_perf_storeBufferEmptyAfterStore (_dcache_io_cpu_perf_storeBufferEmptyAfterStore),
.io_cpu_keep_clock_enabled (_dcacheArb_io_mem_keep_clock_enabled), // @[HellaCache.scala:292:25]
.io_ptw_req_ready (_ptw_io_requestor_0_req_ready), // @[PTW.scala:802:19]
.io_ptw_req_bits_bits_addr (_dcache_io_ptw_req_bits_bits_addr),
.io_ptw_req_bits_bits_need_gpa (_dcache_io_ptw_req_bits_bits_need_gpa),
.io_ptw_req_bits_bits_vstage1 (_dcache_io_ptw_req_bits_bits_vstage1),
.io_ptw_req_bits_bits_stage2 (_dcache_io_ptw_req_bits_bits_stage2),
.io_ptw_resp_valid (_ptw_io_requestor_0_resp_valid), // @[PTW.scala:802:19]
.io_ptw_resp_bits_ae_ptw (_ptw_io_requestor_0_resp_bits_ae_ptw), // @[PTW.scala:802:19]
.io_ptw_resp_bits_ae_final (_ptw_io_requestor_0_resp_bits_ae_final), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pf (_ptw_io_requestor_0_resp_bits_pf), // @[PTW.scala:802:19]
.io_ptw_resp_bits_gf (_ptw_io_requestor_0_resp_bits_gf), // @[PTW.scala:802:19]
.io_ptw_resp_bits_hr (_ptw_io_requestor_0_resp_bits_hr), // @[PTW.scala:802:19]
.io_ptw_resp_bits_hw (_ptw_io_requestor_0_resp_bits_hw), // @[PTW.scala:802:19]
.io_ptw_resp_bits_hx (_ptw_io_requestor_0_resp_bits_hx), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_reserved_for_future (_ptw_io_requestor_0_resp_bits_pte_reserved_for_future), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_ppn (_ptw_io_requestor_0_resp_bits_pte_ppn), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_reserved_for_software (_ptw_io_requestor_0_resp_bits_pte_reserved_for_software), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_d (_ptw_io_requestor_0_resp_bits_pte_d), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_a (_ptw_io_requestor_0_resp_bits_pte_a), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_g (_ptw_io_requestor_0_resp_bits_pte_g), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_u (_ptw_io_requestor_0_resp_bits_pte_u), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_x (_ptw_io_requestor_0_resp_bits_pte_x), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_w (_ptw_io_requestor_0_resp_bits_pte_w), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_r (_ptw_io_requestor_0_resp_bits_pte_r), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_v (_ptw_io_requestor_0_resp_bits_pte_v), // @[PTW.scala:802:19]
.io_ptw_resp_bits_level (_ptw_io_requestor_0_resp_bits_level), // @[PTW.scala:802:19]
.io_ptw_resp_bits_homogeneous (_ptw_io_requestor_0_resp_bits_homogeneous), // @[PTW.scala:802:19]
.io_ptw_resp_bits_gpa_valid (_ptw_io_requestor_0_resp_bits_gpa_valid), // @[PTW.scala:802:19]
.io_ptw_resp_bits_gpa_bits (_ptw_io_requestor_0_resp_bits_gpa_bits), // @[PTW.scala:802:19]
.io_ptw_resp_bits_gpa_is_pte (_ptw_io_requestor_0_resp_bits_gpa_is_pte), // @[PTW.scala:802:19]
.io_ptw_status_debug (_ptw_io_requestor_0_status_debug), // @[PTW.scala:802:19]
.io_ptw_status_cease (_ptw_io_requestor_0_status_cease), // @[PTW.scala:802:19]
.io_ptw_status_wfi (_ptw_io_requestor_0_status_wfi), // @[PTW.scala:802:19]
.io_ptw_status_isa (_ptw_io_requestor_0_status_isa), // @[PTW.scala:802:19]
.io_ptw_status_dv (_ptw_io_requestor_0_status_dv), // @[PTW.scala:802:19]
.io_ptw_status_v (_ptw_io_requestor_0_status_v), // @[PTW.scala:802:19]
.io_ptw_status_sd (_ptw_io_requestor_0_status_sd), // @[PTW.scala:802:19]
.io_ptw_status_mpv (_ptw_io_requestor_0_status_mpv), // @[PTW.scala:802:19]
.io_ptw_status_gva (_ptw_io_requestor_0_status_gva), // @[PTW.scala:802:19]
.io_ptw_status_fs (_ptw_io_requestor_0_status_fs), // @[PTW.scala:802:19]
.io_ptw_status_mpp (_ptw_io_requestor_0_status_mpp), // @[PTW.scala:802:19]
.io_ptw_status_mpie (_ptw_io_requestor_0_status_mpie), // @[PTW.scala:802:19]
.io_ptw_status_mie (_ptw_io_requestor_0_status_mie), // @[PTW.scala:802:19]
.io_ptw_gstatus_debug (_ptw_io_requestor_0_gstatus_debug), // @[PTW.scala:802:19]
.io_ptw_gstatus_cease (_ptw_io_requestor_0_gstatus_cease), // @[PTW.scala:802:19]
.io_ptw_gstatus_wfi (_ptw_io_requestor_0_gstatus_wfi), // @[PTW.scala:802:19]
.io_ptw_gstatus_isa (_ptw_io_requestor_0_gstatus_isa), // @[PTW.scala:802:19]
.io_ptw_gstatus_dprv (_ptw_io_requestor_0_gstatus_dprv), // @[PTW.scala:802:19]
.io_ptw_gstatus_dv (_ptw_io_requestor_0_gstatus_dv), // @[PTW.scala:802:19]
.io_ptw_gstatus_prv (_ptw_io_requestor_0_gstatus_prv), // @[PTW.scala:802:19]
.io_ptw_gstatus_v (_ptw_io_requestor_0_gstatus_v), // @[PTW.scala:802:19]
.io_ptw_gstatus_sd (_ptw_io_requestor_0_gstatus_sd), // @[PTW.scala:802:19]
.io_ptw_gstatus_zero2 (_ptw_io_requestor_0_gstatus_zero2), // @[PTW.scala:802:19]
.io_ptw_gstatus_mpv (_ptw_io_requestor_0_gstatus_mpv), // @[PTW.scala:802:19]
.io_ptw_gstatus_gva (_ptw_io_requestor_0_gstatus_gva), // @[PTW.scala:802:19]
.io_ptw_gstatus_mbe (_ptw_io_requestor_0_gstatus_mbe), // @[PTW.scala:802:19]
.io_ptw_gstatus_sbe (_ptw_io_requestor_0_gstatus_sbe), // @[PTW.scala:802:19]
.io_ptw_gstatus_sxl (_ptw_io_requestor_0_gstatus_sxl), // @[PTW.scala:802:19]
.io_ptw_gstatus_zero1 (_ptw_io_requestor_0_gstatus_zero1), // @[PTW.scala:802:19]
.io_ptw_gstatus_tsr (_ptw_io_requestor_0_gstatus_tsr), // @[PTW.scala:802:19]
.io_ptw_gstatus_tw (_ptw_io_requestor_0_gstatus_tw), // @[PTW.scala:802:19]
.io_ptw_gstatus_tvm (_ptw_io_requestor_0_gstatus_tvm), // @[PTW.scala:802:19]
.io_ptw_gstatus_mxr (_ptw_io_requestor_0_gstatus_mxr), // @[PTW.scala:802:19]
.io_ptw_gstatus_sum (_ptw_io_requestor_0_gstatus_sum), // @[PTW.scala:802:19]
.io_ptw_gstatus_mprv (_ptw_io_requestor_0_gstatus_mprv), // @[PTW.scala:802:19]
.io_ptw_gstatus_fs (_ptw_io_requestor_0_gstatus_fs), // @[PTW.scala:802:19]
.io_ptw_gstatus_mpp (_ptw_io_requestor_0_gstatus_mpp), // @[PTW.scala:802:19]
.io_ptw_gstatus_vs (_ptw_io_requestor_0_gstatus_vs), // @[PTW.scala:802:19]
.io_ptw_gstatus_spp (_ptw_io_requestor_0_gstatus_spp), // @[PTW.scala:802:19]
.io_ptw_gstatus_mpie (_ptw_io_requestor_0_gstatus_mpie), // @[PTW.scala:802:19]
.io_ptw_gstatus_ube (_ptw_io_requestor_0_gstatus_ube), // @[PTW.scala:802:19]
.io_ptw_gstatus_spie (_ptw_io_requestor_0_gstatus_spie), // @[PTW.scala:802:19]
.io_ptw_gstatus_upie (_ptw_io_requestor_0_gstatus_upie), // @[PTW.scala:802:19]
.io_ptw_gstatus_mie (_ptw_io_requestor_0_gstatus_mie), // @[PTW.scala:802:19]
.io_ptw_gstatus_hie (_ptw_io_requestor_0_gstatus_hie), // @[PTW.scala:802:19]
.io_ptw_gstatus_sie (_ptw_io_requestor_0_gstatus_sie), // @[PTW.scala:802:19]
.io_ptw_gstatus_uie (_ptw_io_requestor_0_gstatus_uie), // @[PTW.scala:802:19]
.io_ptw_pmp_0_cfg_l (_ptw_io_requestor_0_pmp_0_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_0_cfg_a (_ptw_io_requestor_0_pmp_0_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_0_cfg_x (_ptw_io_requestor_0_pmp_0_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_0_cfg_w (_ptw_io_requestor_0_pmp_0_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_0_cfg_r (_ptw_io_requestor_0_pmp_0_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_0_addr (_ptw_io_requestor_0_pmp_0_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_0_mask (_ptw_io_requestor_0_pmp_0_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_1_cfg_l (_ptw_io_requestor_0_pmp_1_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_1_cfg_a (_ptw_io_requestor_0_pmp_1_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_1_cfg_x (_ptw_io_requestor_0_pmp_1_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_1_cfg_w (_ptw_io_requestor_0_pmp_1_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_1_cfg_r (_ptw_io_requestor_0_pmp_1_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_1_addr (_ptw_io_requestor_0_pmp_1_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_1_mask (_ptw_io_requestor_0_pmp_1_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_2_cfg_l (_ptw_io_requestor_0_pmp_2_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_2_cfg_a (_ptw_io_requestor_0_pmp_2_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_2_cfg_x (_ptw_io_requestor_0_pmp_2_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_2_cfg_w (_ptw_io_requestor_0_pmp_2_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_2_cfg_r (_ptw_io_requestor_0_pmp_2_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_2_addr (_ptw_io_requestor_0_pmp_2_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_2_mask (_ptw_io_requestor_0_pmp_2_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_3_cfg_l (_ptw_io_requestor_0_pmp_3_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_3_cfg_a (_ptw_io_requestor_0_pmp_3_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_3_cfg_x (_ptw_io_requestor_0_pmp_3_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_3_cfg_w (_ptw_io_requestor_0_pmp_3_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_3_cfg_r (_ptw_io_requestor_0_pmp_3_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_3_addr (_ptw_io_requestor_0_pmp_3_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_3_mask (_ptw_io_requestor_0_pmp_3_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_4_cfg_l (_ptw_io_requestor_0_pmp_4_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_4_cfg_a (_ptw_io_requestor_0_pmp_4_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_4_cfg_x (_ptw_io_requestor_0_pmp_4_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_4_cfg_w (_ptw_io_requestor_0_pmp_4_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_4_cfg_r (_ptw_io_requestor_0_pmp_4_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_4_addr (_ptw_io_requestor_0_pmp_4_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_4_mask (_ptw_io_requestor_0_pmp_4_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_5_cfg_l (_ptw_io_requestor_0_pmp_5_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_5_cfg_a (_ptw_io_requestor_0_pmp_5_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_5_cfg_x (_ptw_io_requestor_0_pmp_5_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_5_cfg_w (_ptw_io_requestor_0_pmp_5_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_5_cfg_r (_ptw_io_requestor_0_pmp_5_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_5_addr (_ptw_io_requestor_0_pmp_5_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_5_mask (_ptw_io_requestor_0_pmp_5_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_6_cfg_l (_ptw_io_requestor_0_pmp_6_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_6_cfg_a (_ptw_io_requestor_0_pmp_6_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_6_cfg_x (_ptw_io_requestor_0_pmp_6_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_6_cfg_w (_ptw_io_requestor_0_pmp_6_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_6_cfg_r (_ptw_io_requestor_0_pmp_6_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_6_addr (_ptw_io_requestor_0_pmp_6_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_6_mask (_ptw_io_requestor_0_pmp_6_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_7_cfg_l (_ptw_io_requestor_0_pmp_7_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_7_cfg_a (_ptw_io_requestor_0_pmp_7_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_7_cfg_x (_ptw_io_requestor_0_pmp_7_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_7_cfg_w (_ptw_io_requestor_0_pmp_7_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_7_cfg_r (_ptw_io_requestor_0_pmp_7_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_7_addr (_ptw_io_requestor_0_pmp_7_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_7_mask (_ptw_io_requestor_0_pmp_7_mask), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_0_ren (_ptw_io_requestor_0_customCSRs_csrs_0_ren), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_0_wen (_ptw_io_requestor_0_customCSRs_csrs_0_wen), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_0_wdata (_ptw_io_requestor_0_customCSRs_csrs_0_wdata), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_0_value (_ptw_io_requestor_0_customCSRs_csrs_0_value), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_1_ren (_ptw_io_requestor_0_customCSRs_csrs_1_ren), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_1_wen (_ptw_io_requestor_0_customCSRs_csrs_1_wen), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_1_wdata (_ptw_io_requestor_0_customCSRs_csrs_1_wdata), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_1_value (_ptw_io_requestor_0_customCSRs_csrs_1_value), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_2_ren (_ptw_io_requestor_0_customCSRs_csrs_2_ren), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_2_wen (_ptw_io_requestor_0_customCSRs_csrs_2_wen), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_2_wdata (_ptw_io_requestor_0_customCSRs_csrs_2_wdata), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_2_value (_ptw_io_requestor_0_customCSRs_csrs_2_value), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_3_ren (_ptw_io_requestor_0_customCSRs_csrs_3_ren), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_3_wen (_ptw_io_requestor_0_customCSRs_csrs_3_wen), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_3_wdata (_ptw_io_requestor_0_customCSRs_csrs_3_wdata), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_3_value (_ptw_io_requestor_0_customCSRs_csrs_3_value) // @[PTW.scala:802:19]
); // @[HellaCache.scala:278:43]
Frontend frontend ( // @[Frontend.scala:393:28]
.clock (clock),
.reset (reset),
.auto_icache_master_out_a_ready (widget_1_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9]
.auto_icache_master_out_a_valid (widget_1_auto_anon_in_a_valid),
.auto_icache_master_out_a_bits_address (widget_1_auto_anon_in_a_bits_address),
.auto_icache_master_out_a_bits_user_amba_prot_readalloc (widget_1_auto_anon_in_a_bits_user_amba_prot_readalloc),
.auto_icache_master_out_a_bits_user_amba_prot_writealloc (widget_1_auto_anon_in_a_bits_user_amba_prot_writealloc),
.auto_icache_master_out_d_valid (widget_1_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9]
.auto_icache_master_out_d_bits_opcode (widget_1_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9]
.auto_icache_master_out_d_bits_param (widget_1_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9]
.auto_icache_master_out_d_bits_size (widget_1_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9]
.auto_icache_master_out_d_bits_sink (widget_1_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9]
.auto_icache_master_out_d_bits_denied (widget_1_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9]
.auto_icache_master_out_d_bits_data (widget_1_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9]
.auto_icache_master_out_d_bits_corrupt (widget_1_auto_anon_in_d_bits_corrupt), // @[WidthWidget.scala:27:9]
.io_cpu_might_request (_core_io_imem_might_request), // @[RocketTile.scala:147:20]
.io_cpu_req_valid (_core_io_imem_req_valid), // @[RocketTile.scala:147:20]
.io_cpu_req_bits_pc (_core_io_imem_req_bits_pc), // @[RocketTile.scala:147:20]
.io_cpu_req_bits_speculative (_core_io_imem_req_bits_speculative), // @[RocketTile.scala:147:20]
.io_cpu_sfence_valid (_core_io_imem_sfence_valid), // @[RocketTile.scala:147:20]
.io_cpu_sfence_bits_rs1 (_core_io_imem_sfence_bits_rs1), // @[RocketTile.scala:147:20]
.io_cpu_sfence_bits_rs2 (_core_io_imem_sfence_bits_rs2), // @[RocketTile.scala:147:20]
.io_cpu_sfence_bits_addr (_core_io_imem_sfence_bits_addr), // @[RocketTile.scala:147:20]
.io_cpu_sfence_bits_asid (_core_io_imem_sfence_bits_asid), // @[RocketTile.scala:147:20]
.io_cpu_sfence_bits_hv (_core_io_imem_sfence_bits_hv), // @[RocketTile.scala:147:20]
.io_cpu_sfence_bits_hg (_core_io_imem_sfence_bits_hg), // @[RocketTile.scala:147:20]
.io_cpu_resp_ready (_core_io_imem_resp_ready), // @[RocketTile.scala:147:20]
.io_cpu_resp_valid (_frontend_io_cpu_resp_valid),
.io_cpu_resp_bits_btb_cfiType (_frontend_io_cpu_resp_bits_btb_cfiType),
.io_cpu_resp_bits_btb_taken (_frontend_io_cpu_resp_bits_btb_taken),
.io_cpu_resp_bits_btb_mask (_frontend_io_cpu_resp_bits_btb_mask),
.io_cpu_resp_bits_btb_bridx (_frontend_io_cpu_resp_bits_btb_bridx),
.io_cpu_resp_bits_btb_target (_frontend_io_cpu_resp_bits_btb_target),
.io_cpu_resp_bits_btb_entry (_frontend_io_cpu_resp_bits_btb_entry),
.io_cpu_resp_bits_btb_bht_history (_frontend_io_cpu_resp_bits_btb_bht_history),
.io_cpu_resp_bits_btb_bht_value (_frontend_io_cpu_resp_bits_btb_bht_value),
.io_cpu_resp_bits_pc (_frontend_io_cpu_resp_bits_pc),
.io_cpu_resp_bits_data (_frontend_io_cpu_resp_bits_data),
.io_cpu_resp_bits_mask (_frontend_io_cpu_resp_bits_mask),
.io_cpu_resp_bits_xcpt_pf_inst (_frontend_io_cpu_resp_bits_xcpt_pf_inst),
.io_cpu_resp_bits_xcpt_gf_inst (_frontend_io_cpu_resp_bits_xcpt_gf_inst),
.io_cpu_resp_bits_xcpt_ae_inst (_frontend_io_cpu_resp_bits_xcpt_ae_inst),
.io_cpu_resp_bits_replay (_frontend_io_cpu_resp_bits_replay),
.io_cpu_gpa_valid (_frontend_io_cpu_gpa_valid),
.io_cpu_gpa_bits (_frontend_io_cpu_gpa_bits),
.io_cpu_gpa_is_pte (_frontend_io_cpu_gpa_is_pte),
.io_cpu_btb_update_valid (_core_io_imem_btb_update_valid), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_prediction_cfiType (_core_io_imem_btb_update_bits_prediction_cfiType), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_prediction_taken (_core_io_imem_btb_update_bits_prediction_taken), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_prediction_mask (_core_io_imem_btb_update_bits_prediction_mask), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_prediction_bridx (_core_io_imem_btb_update_bits_prediction_bridx), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_prediction_target (_core_io_imem_btb_update_bits_prediction_target), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_prediction_entry (_core_io_imem_btb_update_bits_prediction_entry), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_prediction_bht_history (_core_io_imem_btb_update_bits_prediction_bht_history), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_prediction_bht_value (_core_io_imem_btb_update_bits_prediction_bht_value), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_pc (_core_io_imem_btb_update_bits_pc), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_target (_core_io_imem_btb_update_bits_target), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_isValid (_core_io_imem_btb_update_bits_isValid), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_br_pc (_core_io_imem_btb_update_bits_br_pc), // @[RocketTile.scala:147:20]
.io_cpu_btb_update_bits_cfiType (_core_io_imem_btb_update_bits_cfiType), // @[RocketTile.scala:147:20]
.io_cpu_bht_update_valid (_core_io_imem_bht_update_valid), // @[RocketTile.scala:147:20]
.io_cpu_bht_update_bits_prediction_history (_core_io_imem_bht_update_bits_prediction_history), // @[RocketTile.scala:147:20]
.io_cpu_bht_update_bits_prediction_value (_core_io_imem_bht_update_bits_prediction_value), // @[RocketTile.scala:147:20]
.io_cpu_bht_update_bits_pc (_core_io_imem_bht_update_bits_pc), // @[RocketTile.scala:147:20]
.io_cpu_bht_update_bits_branch (_core_io_imem_bht_update_bits_branch), // @[RocketTile.scala:147:20]
.io_cpu_bht_update_bits_taken (_core_io_imem_bht_update_bits_taken), // @[RocketTile.scala:147:20]
.io_cpu_bht_update_bits_mispredict (_core_io_imem_bht_update_bits_mispredict), // @[RocketTile.scala:147:20]
.io_cpu_flush_icache (_core_io_imem_flush_icache), // @[RocketTile.scala:147:20]
.io_cpu_npc (_frontend_io_cpu_npc),
.io_cpu_perf_acquire (_frontend_io_cpu_perf_acquire),
.io_cpu_progress (_core_io_imem_progress), // @[RocketTile.scala:147:20]
.io_ptw_req_ready (_ptw_io_requestor_1_req_ready), // @[PTW.scala:802:19]
.io_ptw_req_bits_valid (_frontend_io_ptw_req_bits_valid),
.io_ptw_req_bits_bits_addr (_frontend_io_ptw_req_bits_bits_addr),
.io_ptw_req_bits_bits_need_gpa (_frontend_io_ptw_req_bits_bits_need_gpa),
.io_ptw_req_bits_bits_vstage1 (_frontend_io_ptw_req_bits_bits_vstage1),
.io_ptw_req_bits_bits_stage2 (_frontend_io_ptw_req_bits_bits_stage2),
.io_ptw_resp_valid (_ptw_io_requestor_1_resp_valid), // @[PTW.scala:802:19]
.io_ptw_resp_bits_ae_ptw (_ptw_io_requestor_1_resp_bits_ae_ptw), // @[PTW.scala:802:19]
.io_ptw_resp_bits_ae_final (_ptw_io_requestor_1_resp_bits_ae_final), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pf (_ptw_io_requestor_1_resp_bits_pf), // @[PTW.scala:802:19]
.io_ptw_resp_bits_gf (_ptw_io_requestor_1_resp_bits_gf), // @[PTW.scala:802:19]
.io_ptw_resp_bits_hr (_ptw_io_requestor_1_resp_bits_hr), // @[PTW.scala:802:19]
.io_ptw_resp_bits_hw (_ptw_io_requestor_1_resp_bits_hw), // @[PTW.scala:802:19]
.io_ptw_resp_bits_hx (_ptw_io_requestor_1_resp_bits_hx), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_reserved_for_future (_ptw_io_requestor_1_resp_bits_pte_reserved_for_future), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_ppn (_ptw_io_requestor_1_resp_bits_pte_ppn), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_reserved_for_software (_ptw_io_requestor_1_resp_bits_pte_reserved_for_software), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_d (_ptw_io_requestor_1_resp_bits_pte_d), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_a (_ptw_io_requestor_1_resp_bits_pte_a), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_g (_ptw_io_requestor_1_resp_bits_pte_g), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_u (_ptw_io_requestor_1_resp_bits_pte_u), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_x (_ptw_io_requestor_1_resp_bits_pte_x), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_w (_ptw_io_requestor_1_resp_bits_pte_w), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_r (_ptw_io_requestor_1_resp_bits_pte_r), // @[PTW.scala:802:19]
.io_ptw_resp_bits_pte_v (_ptw_io_requestor_1_resp_bits_pte_v), // @[PTW.scala:802:19]
.io_ptw_resp_bits_level (_ptw_io_requestor_1_resp_bits_level), // @[PTW.scala:802:19]
.io_ptw_resp_bits_homogeneous (_ptw_io_requestor_1_resp_bits_homogeneous), // @[PTW.scala:802:19]
.io_ptw_resp_bits_gpa_valid (_ptw_io_requestor_1_resp_bits_gpa_valid), // @[PTW.scala:802:19]
.io_ptw_resp_bits_gpa_bits (_ptw_io_requestor_1_resp_bits_gpa_bits), // @[PTW.scala:802:19]
.io_ptw_resp_bits_gpa_is_pte (_ptw_io_requestor_1_resp_bits_gpa_is_pte), // @[PTW.scala:802:19]
.io_ptw_status_debug (_ptw_io_requestor_1_status_debug), // @[PTW.scala:802:19]
.io_ptw_status_cease (_ptw_io_requestor_1_status_cease), // @[PTW.scala:802:19]
.io_ptw_status_wfi (_ptw_io_requestor_1_status_wfi), // @[PTW.scala:802:19]
.io_ptw_status_isa (_ptw_io_requestor_1_status_isa), // @[PTW.scala:802:19]
.io_ptw_status_dv (_ptw_io_requestor_1_status_dv), // @[PTW.scala:802:19]
.io_ptw_status_v (_ptw_io_requestor_1_status_v), // @[PTW.scala:802:19]
.io_ptw_status_sd (_ptw_io_requestor_1_status_sd), // @[PTW.scala:802:19]
.io_ptw_status_mpv (_ptw_io_requestor_1_status_mpv), // @[PTW.scala:802:19]
.io_ptw_status_gva (_ptw_io_requestor_1_status_gva), // @[PTW.scala:802:19]
.io_ptw_status_fs (_ptw_io_requestor_1_status_fs), // @[PTW.scala:802:19]
.io_ptw_status_mpp (_ptw_io_requestor_1_status_mpp), // @[PTW.scala:802:19]
.io_ptw_status_mpie (_ptw_io_requestor_1_status_mpie), // @[PTW.scala:802:19]
.io_ptw_status_mie (_ptw_io_requestor_1_status_mie), // @[PTW.scala:802:19]
.io_ptw_gstatus_debug (_ptw_io_requestor_1_gstatus_debug), // @[PTW.scala:802:19]
.io_ptw_gstatus_cease (_ptw_io_requestor_1_gstatus_cease), // @[PTW.scala:802:19]
.io_ptw_gstatus_wfi (_ptw_io_requestor_1_gstatus_wfi), // @[PTW.scala:802:19]
.io_ptw_gstatus_isa (_ptw_io_requestor_1_gstatus_isa), // @[PTW.scala:802:19]
.io_ptw_gstatus_dprv (_ptw_io_requestor_1_gstatus_dprv), // @[PTW.scala:802:19]
.io_ptw_gstatus_dv (_ptw_io_requestor_1_gstatus_dv), // @[PTW.scala:802:19]
.io_ptw_gstatus_prv (_ptw_io_requestor_1_gstatus_prv), // @[PTW.scala:802:19]
.io_ptw_gstatus_v (_ptw_io_requestor_1_gstatus_v), // @[PTW.scala:802:19]
.io_ptw_gstatus_sd (_ptw_io_requestor_1_gstatus_sd), // @[PTW.scala:802:19]
.io_ptw_gstatus_zero2 (_ptw_io_requestor_1_gstatus_zero2), // @[PTW.scala:802:19]
.io_ptw_gstatus_mpv (_ptw_io_requestor_1_gstatus_mpv), // @[PTW.scala:802:19]
.io_ptw_gstatus_gva (_ptw_io_requestor_1_gstatus_gva), // @[PTW.scala:802:19]
.io_ptw_gstatus_mbe (_ptw_io_requestor_1_gstatus_mbe), // @[PTW.scala:802:19]
.io_ptw_gstatus_sbe (_ptw_io_requestor_1_gstatus_sbe), // @[PTW.scala:802:19]
.io_ptw_gstatus_sxl (_ptw_io_requestor_1_gstatus_sxl), // @[PTW.scala:802:19]
.io_ptw_gstatus_zero1 (_ptw_io_requestor_1_gstatus_zero1), // @[PTW.scala:802:19]
.io_ptw_gstatus_tsr (_ptw_io_requestor_1_gstatus_tsr), // @[PTW.scala:802:19]
.io_ptw_gstatus_tw (_ptw_io_requestor_1_gstatus_tw), // @[PTW.scala:802:19]
.io_ptw_gstatus_tvm (_ptw_io_requestor_1_gstatus_tvm), // @[PTW.scala:802:19]
.io_ptw_gstatus_mxr (_ptw_io_requestor_1_gstatus_mxr), // @[PTW.scala:802:19]
.io_ptw_gstatus_sum (_ptw_io_requestor_1_gstatus_sum), // @[PTW.scala:802:19]
.io_ptw_gstatus_mprv (_ptw_io_requestor_1_gstatus_mprv), // @[PTW.scala:802:19]
.io_ptw_gstatus_fs (_ptw_io_requestor_1_gstatus_fs), // @[PTW.scala:802:19]
.io_ptw_gstatus_mpp (_ptw_io_requestor_1_gstatus_mpp), // @[PTW.scala:802:19]
.io_ptw_gstatus_vs (_ptw_io_requestor_1_gstatus_vs), // @[PTW.scala:802:19]
.io_ptw_gstatus_spp (_ptw_io_requestor_1_gstatus_spp), // @[PTW.scala:802:19]
.io_ptw_gstatus_mpie (_ptw_io_requestor_1_gstatus_mpie), // @[PTW.scala:802:19]
.io_ptw_gstatus_ube (_ptw_io_requestor_1_gstatus_ube), // @[PTW.scala:802:19]
.io_ptw_gstatus_spie (_ptw_io_requestor_1_gstatus_spie), // @[PTW.scala:802:19]
.io_ptw_gstatus_upie (_ptw_io_requestor_1_gstatus_upie), // @[PTW.scala:802:19]
.io_ptw_gstatus_mie (_ptw_io_requestor_1_gstatus_mie), // @[PTW.scala:802:19]
.io_ptw_gstatus_hie (_ptw_io_requestor_1_gstatus_hie), // @[PTW.scala:802:19]
.io_ptw_gstatus_sie (_ptw_io_requestor_1_gstatus_sie), // @[PTW.scala:802:19]
.io_ptw_gstatus_uie (_ptw_io_requestor_1_gstatus_uie), // @[PTW.scala:802:19]
.io_ptw_pmp_0_cfg_l (_ptw_io_requestor_1_pmp_0_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_0_cfg_a (_ptw_io_requestor_1_pmp_0_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_0_cfg_x (_ptw_io_requestor_1_pmp_0_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_0_cfg_w (_ptw_io_requestor_1_pmp_0_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_0_cfg_r (_ptw_io_requestor_1_pmp_0_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_0_addr (_ptw_io_requestor_1_pmp_0_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_0_mask (_ptw_io_requestor_1_pmp_0_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_1_cfg_l (_ptw_io_requestor_1_pmp_1_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_1_cfg_a (_ptw_io_requestor_1_pmp_1_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_1_cfg_x (_ptw_io_requestor_1_pmp_1_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_1_cfg_w (_ptw_io_requestor_1_pmp_1_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_1_cfg_r (_ptw_io_requestor_1_pmp_1_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_1_addr (_ptw_io_requestor_1_pmp_1_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_1_mask (_ptw_io_requestor_1_pmp_1_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_2_cfg_l (_ptw_io_requestor_1_pmp_2_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_2_cfg_a (_ptw_io_requestor_1_pmp_2_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_2_cfg_x (_ptw_io_requestor_1_pmp_2_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_2_cfg_w (_ptw_io_requestor_1_pmp_2_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_2_cfg_r (_ptw_io_requestor_1_pmp_2_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_2_addr (_ptw_io_requestor_1_pmp_2_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_2_mask (_ptw_io_requestor_1_pmp_2_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_3_cfg_l (_ptw_io_requestor_1_pmp_3_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_3_cfg_a (_ptw_io_requestor_1_pmp_3_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_3_cfg_x (_ptw_io_requestor_1_pmp_3_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_3_cfg_w (_ptw_io_requestor_1_pmp_3_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_3_cfg_r (_ptw_io_requestor_1_pmp_3_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_3_addr (_ptw_io_requestor_1_pmp_3_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_3_mask (_ptw_io_requestor_1_pmp_3_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_4_cfg_l (_ptw_io_requestor_1_pmp_4_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_4_cfg_a (_ptw_io_requestor_1_pmp_4_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_4_cfg_x (_ptw_io_requestor_1_pmp_4_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_4_cfg_w (_ptw_io_requestor_1_pmp_4_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_4_cfg_r (_ptw_io_requestor_1_pmp_4_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_4_addr (_ptw_io_requestor_1_pmp_4_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_4_mask (_ptw_io_requestor_1_pmp_4_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_5_cfg_l (_ptw_io_requestor_1_pmp_5_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_5_cfg_a (_ptw_io_requestor_1_pmp_5_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_5_cfg_x (_ptw_io_requestor_1_pmp_5_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_5_cfg_w (_ptw_io_requestor_1_pmp_5_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_5_cfg_r (_ptw_io_requestor_1_pmp_5_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_5_addr (_ptw_io_requestor_1_pmp_5_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_5_mask (_ptw_io_requestor_1_pmp_5_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_6_cfg_l (_ptw_io_requestor_1_pmp_6_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_6_cfg_a (_ptw_io_requestor_1_pmp_6_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_6_cfg_x (_ptw_io_requestor_1_pmp_6_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_6_cfg_w (_ptw_io_requestor_1_pmp_6_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_6_cfg_r (_ptw_io_requestor_1_pmp_6_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_6_addr (_ptw_io_requestor_1_pmp_6_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_6_mask (_ptw_io_requestor_1_pmp_6_mask), // @[PTW.scala:802:19]
.io_ptw_pmp_7_cfg_l (_ptw_io_requestor_1_pmp_7_cfg_l), // @[PTW.scala:802:19]
.io_ptw_pmp_7_cfg_a (_ptw_io_requestor_1_pmp_7_cfg_a), // @[PTW.scala:802:19]
.io_ptw_pmp_7_cfg_x (_ptw_io_requestor_1_pmp_7_cfg_x), // @[PTW.scala:802:19]
.io_ptw_pmp_7_cfg_w (_ptw_io_requestor_1_pmp_7_cfg_w), // @[PTW.scala:802:19]
.io_ptw_pmp_7_cfg_r (_ptw_io_requestor_1_pmp_7_cfg_r), // @[PTW.scala:802:19]
.io_ptw_pmp_7_addr (_ptw_io_requestor_1_pmp_7_addr), // @[PTW.scala:802:19]
.io_ptw_pmp_7_mask (_ptw_io_requestor_1_pmp_7_mask), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_0_ren (_ptw_io_requestor_1_customCSRs_csrs_0_ren), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_0_wen (_ptw_io_requestor_1_customCSRs_csrs_0_wen), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_0_wdata (_ptw_io_requestor_1_customCSRs_csrs_0_wdata), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_0_value (_ptw_io_requestor_1_customCSRs_csrs_0_value), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_1_ren (_ptw_io_requestor_1_customCSRs_csrs_1_ren), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_1_wen (_ptw_io_requestor_1_customCSRs_csrs_1_wen), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_1_wdata (_ptw_io_requestor_1_customCSRs_csrs_1_wdata), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_1_value (_ptw_io_requestor_1_customCSRs_csrs_1_value), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_2_ren (_ptw_io_requestor_1_customCSRs_csrs_2_ren), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_2_wen (_ptw_io_requestor_1_customCSRs_csrs_2_wen), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_2_wdata (_ptw_io_requestor_1_customCSRs_csrs_2_wdata), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_2_value (_ptw_io_requestor_1_customCSRs_csrs_2_value), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_3_ren (_ptw_io_requestor_1_customCSRs_csrs_3_ren), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_3_wen (_ptw_io_requestor_1_customCSRs_csrs_3_wen), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_3_wdata (_ptw_io_requestor_1_customCSRs_csrs_3_wdata), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_3_value (_ptw_io_requestor_1_customCSRs_csrs_3_value) // @[PTW.scala:802:19]
); // @[Frontend.scala:393:28]
TLFragmenter fragmenter ( // @[Fragmenter.scala:345:34]
.clock (clock),
.reset (reset)
); // @[Fragmenter.scala:345:34]
ScratchpadSlavePort dtim_adapter ( // @[RocketTile.scala:75:15]
.clock (clock),
.reset (reset),
.auto_in_a_ready (_dtim_adapter_auto_in_a_ready),
.auto_in_a_valid (_fragmenter_1_auto_anon_out_a_valid), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_opcode (_fragmenter_1_auto_anon_out_a_bits_opcode), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_param (_fragmenter_1_auto_anon_out_a_bits_param), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_size (_fragmenter_1_auto_anon_out_a_bits_size), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_source (_fragmenter_1_auto_anon_out_a_bits_source), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_address (_fragmenter_1_auto_anon_out_a_bits_address), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_mask (_fragmenter_1_auto_anon_out_a_bits_mask), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_data (_fragmenter_1_auto_anon_out_a_bits_data), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_corrupt (_fragmenter_1_auto_anon_out_a_bits_corrupt), // @[Fragmenter.scala:345:34]
.auto_in_d_ready (_fragmenter_1_auto_anon_out_d_ready), // @[Fragmenter.scala:345:34]
.auto_in_d_valid (_dtim_adapter_auto_in_d_valid),
.auto_in_d_bits_opcode (_dtim_adapter_auto_in_d_bits_opcode),
.auto_in_d_bits_size (_dtim_adapter_auto_in_d_bits_size),
.auto_in_d_bits_source (_dtim_adapter_auto_in_d_bits_source),
.auto_in_d_bits_data (_dtim_adapter_auto_in_d_bits_data),
.io_dmem_req_ready (_dcacheArb_io_requestor_1_req_ready), // @[HellaCache.scala:292:25]
.io_dmem_req_valid (_dtim_adapter_io_dmem_req_valid),
.io_dmem_req_bits_addr (_dtim_adapter_io_dmem_req_bits_addr),
.io_dmem_req_bits_cmd (_dtim_adapter_io_dmem_req_bits_cmd),
.io_dmem_req_bits_size (_dtim_adapter_io_dmem_req_bits_size),
.io_dmem_s1_kill (_dtim_adapter_io_dmem_s1_kill),
.io_dmem_s1_data_data (_dtim_adapter_io_dmem_s1_data_data),
.io_dmem_s1_data_mask (_dtim_adapter_io_dmem_s1_data_mask),
.io_dmem_s2_nack (_dcacheArb_io_requestor_1_s2_nack), // @[HellaCache.scala:292:25]
.io_dmem_s2_nack_cause_raw (_dcacheArb_io_requestor_1_s2_nack_cause_raw), // @[HellaCache.scala:292:25]
.io_dmem_s2_uncached (_dcacheArb_io_requestor_1_s2_uncached), // @[HellaCache.scala:292:25]
.io_dmem_s2_paddr (_dcacheArb_io_requestor_1_s2_paddr), // @[HellaCache.scala:292:25]
.io_dmem_resp_valid (_dcacheArb_io_requestor_1_resp_valid), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_addr (_dcacheArb_io_requestor_1_resp_bits_addr), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_tag (_dcacheArb_io_requestor_1_resp_bits_tag), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_cmd (_dcacheArb_io_requestor_1_resp_bits_cmd), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_size (_dcacheArb_io_requestor_1_resp_bits_size), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_signed (_dcacheArb_io_requestor_1_resp_bits_signed), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_dprv (_dcacheArb_io_requestor_1_resp_bits_dprv), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_dv (_dcacheArb_io_requestor_1_resp_bits_dv), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_data (_dcacheArb_io_requestor_1_resp_bits_data), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_mask (_dcacheArb_io_requestor_1_resp_bits_mask), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_replay (_dcacheArb_io_requestor_1_resp_bits_replay), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_has_data (_dcacheArb_io_requestor_1_resp_bits_has_data), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_data_word_bypass (_dcacheArb_io_requestor_1_resp_bits_data_word_bypass), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_data_raw (_dcacheArb_io_requestor_1_resp_bits_data_raw), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_store_data (_dcacheArb_io_requestor_1_resp_bits_store_data), // @[HellaCache.scala:292:25]
.io_dmem_replay_next (_dcacheArb_io_requestor_1_replay_next), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_ma_ld (_dcacheArb_io_requestor_1_s2_xcpt_ma_ld), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_ma_st (_dcacheArb_io_requestor_1_s2_xcpt_ma_st), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_pf_ld (_dcacheArb_io_requestor_1_s2_xcpt_pf_ld), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_pf_st (_dcacheArb_io_requestor_1_s2_xcpt_pf_st), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_ae_ld (_dcacheArb_io_requestor_1_s2_xcpt_ae_ld), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_ae_st (_dcacheArb_io_requestor_1_s2_xcpt_ae_st), // @[HellaCache.scala:292:25]
.io_dmem_s2_gpa (_dcacheArb_io_requestor_1_s2_gpa), // @[HellaCache.scala:292:25]
.io_dmem_ordered (_dcacheArb_io_requestor_1_ordered), // @[HellaCache.scala:292:25]
.io_dmem_store_pending (_dcacheArb_io_requestor_1_store_pending), // @[HellaCache.scala:292:25]
.io_dmem_perf_acquire (_dcacheArb_io_requestor_1_perf_acquire), // @[HellaCache.scala:292:25]
.io_dmem_perf_grant (_dcacheArb_io_requestor_1_perf_grant), // @[HellaCache.scala:292:25]
.io_dmem_perf_blocked (_dcacheArb_io_requestor_1_perf_blocked), // @[HellaCache.scala:292:25]
.io_dmem_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad), // @[HellaCache.scala:292:25]
.io_dmem_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW), // @[HellaCache.scala:292:25]
.io_dmem_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad), // @[HellaCache.scala:292:25]
.io_dmem_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad), // @[HellaCache.scala:292:25]
.io_dmem_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore) // @[HellaCache.scala:292:25]
); // @[RocketTile.scala:75:15]
TLFragmenter_1 fragmenter_1 ( // @[Fragmenter.scala:345:34]
.clock (clock),
.reset (reset),
.auto_anon_in_a_ready (tlSlaveXbar_auto_anon_out_a_ready),
.auto_anon_in_a_valid (tlSlaveXbar_auto_anon_out_a_valid), // @[Xbar.scala:74:9]
.auto_anon_in_a_bits_opcode (tlSlaveXbar_auto_anon_out_a_bits_opcode), // @[Xbar.scala:74:9]
.auto_anon_in_a_bits_param (tlSlaveXbar_auto_anon_out_a_bits_param), // @[Xbar.scala:74:9]
.auto_anon_in_a_bits_size (tlSlaveXbar_auto_anon_out_a_bits_size), // @[Xbar.scala:74:9]
.auto_anon_in_a_bits_source (tlSlaveXbar_auto_anon_out_a_bits_source), // @[Xbar.scala:74:9]
.auto_anon_in_a_bits_address (tlSlaveXbar_auto_anon_out_a_bits_address), // @[Xbar.scala:74:9]
.auto_anon_in_a_bits_mask (tlSlaveXbar_auto_anon_out_a_bits_mask), // @[Xbar.scala:74:9]
.auto_anon_in_a_bits_data (tlSlaveXbar_auto_anon_out_a_bits_data), // @[Xbar.scala:74:9]
.auto_anon_in_a_bits_corrupt (tlSlaveXbar_auto_anon_out_a_bits_corrupt), // @[Xbar.scala:74:9]
.auto_anon_in_d_ready (tlSlaveXbar_auto_anon_out_d_ready), // @[Xbar.scala:74:9]
.auto_anon_in_d_valid (tlSlaveXbar_auto_anon_out_d_valid),
.auto_anon_in_d_bits_opcode (tlSlaveXbar_auto_anon_out_d_bits_opcode),
.auto_anon_in_d_bits_size (tlSlaveXbar_auto_anon_out_d_bits_size),
.auto_anon_in_d_bits_source (tlSlaveXbar_auto_anon_out_d_bits_source),
.auto_anon_in_d_bits_data (tlSlaveXbar_auto_anon_out_d_bits_data),
.auto_anon_out_a_ready (_dtim_adapter_auto_in_a_ready), // @[RocketTile.scala:75:15]
.auto_anon_out_a_valid (_fragmenter_1_auto_anon_out_a_valid),
.auto_anon_out_a_bits_opcode (_fragmenter_1_auto_anon_out_a_bits_opcode),
.auto_anon_out_a_bits_param (_fragmenter_1_auto_anon_out_a_bits_param),
.auto_anon_out_a_bits_size (_fragmenter_1_auto_anon_out_a_bits_size),
.auto_anon_out_a_bits_source (_fragmenter_1_auto_anon_out_a_bits_source),
.auto_anon_out_a_bits_address (_fragmenter_1_auto_anon_out_a_bits_address),
.auto_anon_out_a_bits_mask (_fragmenter_1_auto_anon_out_a_bits_mask),
.auto_anon_out_a_bits_data (_fragmenter_1_auto_anon_out_a_bits_data),
.auto_anon_out_a_bits_corrupt (_fragmenter_1_auto_anon_out_a_bits_corrupt),
.auto_anon_out_d_ready (_fragmenter_1_auto_anon_out_d_ready),
.auto_anon_out_d_valid (_dtim_adapter_auto_in_d_valid), // @[RocketTile.scala:75:15]
.auto_anon_out_d_bits_opcode (_dtim_adapter_auto_in_d_bits_opcode), // @[RocketTile.scala:75:15]
.auto_anon_out_d_bits_size (_dtim_adapter_auto_in_d_bits_size), // @[RocketTile.scala:75:15]
.auto_anon_out_d_bits_source (_dtim_adapter_auto_in_d_bits_source), // @[RocketTile.scala:75:15]
.auto_anon_out_d_bits_data (_dtim_adapter_auto_in_d_bits_data) // @[RocketTile.scala:75:15]
); // @[Fragmenter.scala:345:34]
FPU fpuOpt ( // @[RocketTile.scala:242:62]
.clock (clock),
.reset (reset),
.io_hartid (_core_io_fpu_hartid), // @[RocketTile.scala:147:20]
.io_time (_core_io_fpu_time), // @[RocketTile.scala:147:20]
.io_inst (_core_io_fpu_inst), // @[RocketTile.scala:147:20]
.io_fromint_data (_core_io_fpu_fromint_data), // @[RocketTile.scala:147:20]
.io_fcsr_rm (_core_io_fpu_fcsr_rm), // @[RocketTile.scala:147:20]
.io_fcsr_flags_valid (_fpuOpt_io_fcsr_flags_valid),
.io_fcsr_flags_bits (_fpuOpt_io_fcsr_flags_bits),
.io_store_data (_fpuOpt_io_store_data),
.io_toint_data (_fpuOpt_io_toint_data),
.io_ll_resp_val (_core_io_fpu_ll_resp_val), // @[RocketTile.scala:147:20]
.io_ll_resp_type (_core_io_fpu_ll_resp_type), // @[RocketTile.scala:147:20]
.io_ll_resp_tag (_core_io_fpu_ll_resp_tag), // @[RocketTile.scala:147:20]
.io_ll_resp_data (_core_io_fpu_ll_resp_data), // @[RocketTile.scala:147:20]
.io_valid (_core_io_fpu_valid), // @[RocketTile.scala:147:20]
.io_fcsr_rdy (_fpuOpt_io_fcsr_rdy),
.io_nack_mem (_fpuOpt_io_nack_mem),
.io_illegal_rm (_fpuOpt_io_illegal_rm),
.io_killx (_core_io_fpu_killx), // @[RocketTile.scala:147:20]
.io_killm (_core_io_fpu_killm), // @[RocketTile.scala:147:20]
.io_dec_ldst (_fpuOpt_io_dec_ldst),
.io_dec_wen (_fpuOpt_io_dec_wen),
.io_dec_ren1 (_fpuOpt_io_dec_ren1),
.io_dec_ren2 (_fpuOpt_io_dec_ren2),
.io_dec_ren3 (_fpuOpt_io_dec_ren3),
.io_dec_swap12 (_fpuOpt_io_dec_swap12),
.io_dec_swap23 (_fpuOpt_io_dec_swap23),
.io_dec_typeTagIn (_fpuOpt_io_dec_typeTagIn),
.io_dec_typeTagOut (_fpuOpt_io_dec_typeTagOut),
.io_dec_fromint (_fpuOpt_io_dec_fromint),
.io_dec_toint (_fpuOpt_io_dec_toint),
.io_dec_fastpipe (_fpuOpt_io_dec_fastpipe),
.io_dec_fma (_fpuOpt_io_dec_fma),
.io_dec_div (_fpuOpt_io_dec_div),
.io_dec_sqrt (_fpuOpt_io_dec_sqrt),
.io_dec_wflags (_fpuOpt_io_dec_wflags),
.io_dec_vec (_fpuOpt_io_dec_vec),
.io_sboard_set (_fpuOpt_io_sboard_set),
.io_sboard_clr (_fpuOpt_io_sboard_clr),
.io_sboard_clra (_fpuOpt_io_sboard_clra),
.io_keep_clock_enabled (_core_io_fpu_keep_clock_enabled) // @[RocketTile.scala:147:20]
); // @[RocketTile.scala:242:62]
HellaCacheArbiter dcacheArb ( // @[HellaCache.scala:292:25]
.clock (clock),
.reset (reset),
.io_requestor_0_req_ready (_dcacheArb_io_requestor_0_req_ready),
.io_requestor_0_req_valid (_core_io_dmem_req_valid), // @[RocketTile.scala:147:20]
.io_requestor_0_req_bits_addr (_core_io_dmem_req_bits_addr), // @[RocketTile.scala:147:20]
.io_requestor_0_req_bits_tag (_core_io_dmem_req_bits_tag), // @[RocketTile.scala:147:20]
.io_requestor_0_req_bits_cmd (_core_io_dmem_req_bits_cmd), // @[RocketTile.scala:147:20]
.io_requestor_0_req_bits_size (_core_io_dmem_req_bits_size), // @[RocketTile.scala:147:20]
.io_requestor_0_req_bits_signed (_core_io_dmem_req_bits_signed), // @[RocketTile.scala:147:20]
.io_requestor_0_req_bits_dv (_core_io_dmem_req_bits_dv), // @[RocketTile.scala:147:20]
.io_requestor_0_req_bits_no_resp (_core_io_dmem_req_bits_no_resp), // @[RocketTile.scala:147:20]
.io_requestor_0_s1_kill (_core_io_dmem_s1_kill), // @[RocketTile.scala:147:20]
.io_requestor_0_s1_data_data (_core_io_dmem_s1_data_data), // @[RocketTile.scala:147:20]
.io_requestor_0_s2_nack (_dcacheArb_io_requestor_0_s2_nack),
.io_requestor_0_s2_nack_cause_raw (_dcacheArb_io_requestor_0_s2_nack_cause_raw),
.io_requestor_0_s2_uncached (_dcacheArb_io_requestor_0_s2_uncached),
.io_requestor_0_s2_paddr (_dcacheArb_io_requestor_0_s2_paddr),
.io_requestor_0_resp_valid (_dcacheArb_io_requestor_0_resp_valid),
.io_requestor_0_resp_bits_addr (_dcacheArb_io_requestor_0_resp_bits_addr),
.io_requestor_0_resp_bits_tag (_dcacheArb_io_requestor_0_resp_bits_tag),
.io_requestor_0_resp_bits_cmd (_dcacheArb_io_requestor_0_resp_bits_cmd),
.io_requestor_0_resp_bits_size (_dcacheArb_io_requestor_0_resp_bits_size),
.io_requestor_0_resp_bits_signed (_dcacheArb_io_requestor_0_resp_bits_signed),
.io_requestor_0_resp_bits_dprv (_dcacheArb_io_requestor_0_resp_bits_dprv),
.io_requestor_0_resp_bits_dv (_dcacheArb_io_requestor_0_resp_bits_dv),
.io_requestor_0_resp_bits_data (_dcacheArb_io_requestor_0_resp_bits_data),
.io_requestor_0_resp_bits_mask (_dcacheArb_io_requestor_0_resp_bits_mask),
.io_requestor_0_resp_bits_replay (_dcacheArb_io_requestor_0_resp_bits_replay),
.io_requestor_0_resp_bits_has_data (_dcacheArb_io_requestor_0_resp_bits_has_data),
.io_requestor_0_resp_bits_data_word_bypass (_dcacheArb_io_requestor_0_resp_bits_data_word_bypass),
.io_requestor_0_resp_bits_data_raw (_dcacheArb_io_requestor_0_resp_bits_data_raw),
.io_requestor_0_resp_bits_store_data (_dcacheArb_io_requestor_0_resp_bits_store_data),
.io_requestor_0_replay_next (_dcacheArb_io_requestor_0_replay_next),
.io_requestor_0_s2_xcpt_ma_ld (_dcacheArb_io_requestor_0_s2_xcpt_ma_ld),
.io_requestor_0_s2_xcpt_ma_st (_dcacheArb_io_requestor_0_s2_xcpt_ma_st),
.io_requestor_0_s2_xcpt_pf_ld (_dcacheArb_io_requestor_0_s2_xcpt_pf_ld),
.io_requestor_0_s2_xcpt_pf_st (_dcacheArb_io_requestor_0_s2_xcpt_pf_st),
.io_requestor_0_s2_xcpt_ae_ld (_dcacheArb_io_requestor_0_s2_xcpt_ae_ld),
.io_requestor_0_s2_xcpt_ae_st (_dcacheArb_io_requestor_0_s2_xcpt_ae_st),
.io_requestor_0_s2_gpa (_dcacheArb_io_requestor_0_s2_gpa),
.io_requestor_0_ordered (_dcacheArb_io_requestor_0_ordered),
.io_requestor_0_store_pending (_dcacheArb_io_requestor_0_store_pending),
.io_requestor_0_perf_acquire (_dcacheArb_io_requestor_0_perf_acquire),
.io_requestor_0_perf_grant (_dcacheArb_io_requestor_0_perf_grant),
.io_requestor_0_perf_blocked (_dcacheArb_io_requestor_0_perf_blocked),
.io_requestor_0_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad),
.io_requestor_0_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW),
.io_requestor_0_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad),
.io_requestor_0_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad),
.io_requestor_0_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore),
.io_requestor_0_keep_clock_enabled (_core_io_dmem_keep_clock_enabled), // @[RocketTile.scala:147:20]
.io_requestor_1_req_ready (_dcacheArb_io_requestor_1_req_ready),
.io_requestor_1_req_valid (_dtim_adapter_io_dmem_req_valid), // @[RocketTile.scala:75:15]
.io_requestor_1_req_bits_addr (_dtim_adapter_io_dmem_req_bits_addr), // @[RocketTile.scala:75:15]
.io_requestor_1_req_bits_cmd (_dtim_adapter_io_dmem_req_bits_cmd), // @[RocketTile.scala:75:15]
.io_requestor_1_req_bits_size (_dtim_adapter_io_dmem_req_bits_size), // @[RocketTile.scala:75:15]
.io_requestor_1_s1_kill (_dtim_adapter_io_dmem_s1_kill), // @[RocketTile.scala:75:15]
.io_requestor_1_s1_data_data (_dtim_adapter_io_dmem_s1_data_data), // @[RocketTile.scala:75:15]
.io_requestor_1_s1_data_mask (_dtim_adapter_io_dmem_s1_data_mask), // @[RocketTile.scala:75:15]
.io_requestor_1_s2_nack (_dcacheArb_io_requestor_1_s2_nack),
.io_requestor_1_s2_nack_cause_raw (_dcacheArb_io_requestor_1_s2_nack_cause_raw),
.io_requestor_1_s2_uncached (_dcacheArb_io_requestor_1_s2_uncached),
.io_requestor_1_s2_paddr (_dcacheArb_io_requestor_1_s2_paddr),
.io_requestor_1_resp_valid (_dcacheArb_io_requestor_1_resp_valid),
.io_requestor_1_resp_bits_addr (_dcacheArb_io_requestor_1_resp_bits_addr),
.io_requestor_1_resp_bits_tag (_dcacheArb_io_requestor_1_resp_bits_tag),
.io_requestor_1_resp_bits_cmd (_dcacheArb_io_requestor_1_resp_bits_cmd),
.io_requestor_1_resp_bits_size (_dcacheArb_io_requestor_1_resp_bits_size),
.io_requestor_1_resp_bits_signed (_dcacheArb_io_requestor_1_resp_bits_signed),
.io_requestor_1_resp_bits_dprv (_dcacheArb_io_requestor_1_resp_bits_dprv),
.io_requestor_1_resp_bits_dv (_dcacheArb_io_requestor_1_resp_bits_dv),
.io_requestor_1_resp_bits_data (_dcacheArb_io_requestor_1_resp_bits_data),
.io_requestor_1_resp_bits_mask (_dcacheArb_io_requestor_1_resp_bits_mask),
.io_requestor_1_resp_bits_replay (_dcacheArb_io_requestor_1_resp_bits_replay),
.io_requestor_1_resp_bits_has_data (_dcacheArb_io_requestor_1_resp_bits_has_data),
.io_requestor_1_resp_bits_data_word_bypass (_dcacheArb_io_requestor_1_resp_bits_data_word_bypass),
.io_requestor_1_resp_bits_data_raw (_dcacheArb_io_requestor_1_resp_bits_data_raw),
.io_requestor_1_resp_bits_store_data (_dcacheArb_io_requestor_1_resp_bits_store_data),
.io_requestor_1_replay_next (_dcacheArb_io_requestor_1_replay_next),
.io_requestor_1_s2_xcpt_ma_ld (_dcacheArb_io_requestor_1_s2_xcpt_ma_ld),
.io_requestor_1_s2_xcpt_ma_st (_dcacheArb_io_requestor_1_s2_xcpt_ma_st),
.io_requestor_1_s2_xcpt_pf_ld (_dcacheArb_io_requestor_1_s2_xcpt_pf_ld),
.io_requestor_1_s2_xcpt_pf_st (_dcacheArb_io_requestor_1_s2_xcpt_pf_st),
.io_requestor_1_s2_xcpt_ae_ld (_dcacheArb_io_requestor_1_s2_xcpt_ae_ld),
.io_requestor_1_s2_xcpt_ae_st (_dcacheArb_io_requestor_1_s2_xcpt_ae_st),
.io_requestor_1_s2_gpa (_dcacheArb_io_requestor_1_s2_gpa),
.io_requestor_1_ordered (_dcacheArb_io_requestor_1_ordered),
.io_requestor_1_store_pending (_dcacheArb_io_requestor_1_store_pending),
.io_requestor_1_perf_acquire (_dcacheArb_io_requestor_1_perf_acquire),
.io_requestor_1_perf_grant (_dcacheArb_io_requestor_1_perf_grant),
.io_requestor_1_perf_blocked (_dcacheArb_io_requestor_1_perf_blocked),
.io_requestor_1_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad),
.io_requestor_1_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW),
.io_requestor_1_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad),
.io_requestor_1_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad),
.io_requestor_1_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore),
.io_mem_req_ready (_dcache_io_cpu_req_ready), // @[HellaCache.scala:278:43]
.io_mem_req_valid (_dcacheArb_io_mem_req_valid),
.io_mem_req_bits_addr (_dcacheArb_io_mem_req_bits_addr),
.io_mem_req_bits_tag (_dcacheArb_io_mem_req_bits_tag),
.io_mem_req_bits_cmd (_dcacheArb_io_mem_req_bits_cmd),
.io_mem_req_bits_size (_dcacheArb_io_mem_req_bits_size),
.io_mem_req_bits_signed (_dcacheArb_io_mem_req_bits_signed),
.io_mem_req_bits_dprv (_dcacheArb_io_mem_req_bits_dprv),
.io_mem_req_bits_dv (_dcacheArb_io_mem_req_bits_dv),
.io_mem_req_bits_phys (_dcacheArb_io_mem_req_bits_phys),
.io_mem_req_bits_no_resp (_dcacheArb_io_mem_req_bits_no_resp),
.io_mem_req_bits_no_xcpt (_dcacheArb_io_mem_req_bits_no_xcpt),
.io_mem_s1_kill (_dcacheArb_io_mem_s1_kill),
.io_mem_s1_data_data (_dcacheArb_io_mem_s1_data_data),
.io_mem_s1_data_mask (_dcacheArb_io_mem_s1_data_mask),
.io_mem_s2_nack (_dcache_io_cpu_s2_nack), // @[HellaCache.scala:278:43]
.io_mem_s2_nack_cause_raw (_dcache_io_cpu_s2_nack_cause_raw), // @[HellaCache.scala:278:43]
.io_mem_s2_uncached (_dcache_io_cpu_s2_uncached), // @[HellaCache.scala:278:43]
.io_mem_s2_paddr (_dcache_io_cpu_s2_paddr), // @[HellaCache.scala:278:43]
.io_mem_resp_valid (_dcache_io_cpu_resp_valid), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_addr (_dcache_io_cpu_resp_bits_addr), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_tag (_dcache_io_cpu_resp_bits_tag), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_cmd (_dcache_io_cpu_resp_bits_cmd), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_size (_dcache_io_cpu_resp_bits_size), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_signed (_dcache_io_cpu_resp_bits_signed), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_dprv (_dcache_io_cpu_resp_bits_dprv), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_dv (_dcache_io_cpu_resp_bits_dv), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_data (_dcache_io_cpu_resp_bits_data), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_mask (_dcache_io_cpu_resp_bits_mask), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_replay (_dcache_io_cpu_resp_bits_replay), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_has_data (_dcache_io_cpu_resp_bits_has_data), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_data_word_bypass (_dcache_io_cpu_resp_bits_data_word_bypass), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_data_raw (_dcache_io_cpu_resp_bits_data_raw), // @[HellaCache.scala:278:43]
.io_mem_resp_bits_store_data (_dcache_io_cpu_resp_bits_store_data), // @[HellaCache.scala:278:43]
.io_mem_replay_next (_dcache_io_cpu_replay_next), // @[HellaCache.scala:278:43]
.io_mem_s2_xcpt_ma_ld (_dcache_io_cpu_s2_xcpt_ma_ld), // @[HellaCache.scala:278:43]
.io_mem_s2_xcpt_ma_st (_dcache_io_cpu_s2_xcpt_ma_st), // @[HellaCache.scala:278:43]
.io_mem_s2_xcpt_pf_ld (_dcache_io_cpu_s2_xcpt_pf_ld), // @[HellaCache.scala:278:43]
.io_mem_s2_xcpt_pf_st (_dcache_io_cpu_s2_xcpt_pf_st), // @[HellaCache.scala:278:43]
.io_mem_s2_xcpt_ae_ld (_dcache_io_cpu_s2_xcpt_ae_ld), // @[HellaCache.scala:278:43]
.io_mem_s2_xcpt_ae_st (_dcache_io_cpu_s2_xcpt_ae_st), // @[HellaCache.scala:278:43]
.io_mem_s2_gpa (_dcache_io_cpu_s2_gpa), // @[HellaCache.scala:278:43]
.io_mem_ordered (_dcache_io_cpu_ordered), // @[HellaCache.scala:278:43]
.io_mem_store_pending (_dcache_io_cpu_store_pending), // @[HellaCache.scala:278:43]
.io_mem_perf_acquire (_dcache_io_cpu_perf_acquire), // @[HellaCache.scala:278:43]
.io_mem_perf_grant (_dcache_io_cpu_perf_grant), // @[HellaCache.scala:278:43]
.io_mem_perf_blocked (_dcache_io_cpu_perf_blocked), // @[HellaCache.scala:278:43]
.io_mem_perf_canAcceptStoreThenLoad (_dcache_io_cpu_perf_canAcceptStoreThenLoad), // @[HellaCache.scala:278:43]
.io_mem_perf_canAcceptStoreThenRMW (_dcache_io_cpu_perf_canAcceptStoreThenRMW), // @[HellaCache.scala:278:43]
.io_mem_perf_canAcceptLoadThenLoad (_dcache_io_cpu_perf_canAcceptLoadThenLoad), // @[HellaCache.scala:278:43]
.io_mem_perf_storeBufferEmptyAfterLoad (_dcache_io_cpu_perf_storeBufferEmptyAfterLoad), // @[HellaCache.scala:278:43]
.io_mem_perf_storeBufferEmptyAfterStore (_dcache_io_cpu_perf_storeBufferEmptyAfterStore), // @[HellaCache.scala:278:43]
.io_mem_keep_clock_enabled (_dcacheArb_io_mem_keep_clock_enabled)
); // @[HellaCache.scala:292:25]
PTW ptw ( // @[PTW.scala:802:19]
.clock (clock),
.reset (reset),
.io_requestor_0_req_ready (_ptw_io_requestor_0_req_ready),
.io_requestor_0_req_bits_bits_addr (_dcache_io_ptw_req_bits_bits_addr), // @[HellaCache.scala:278:43]
.io_requestor_0_req_bits_bits_need_gpa (_dcache_io_ptw_req_bits_bits_need_gpa), // @[HellaCache.scala:278:43]
.io_requestor_0_req_bits_bits_vstage1 (_dcache_io_ptw_req_bits_bits_vstage1), // @[HellaCache.scala:278:43]
.io_requestor_0_req_bits_bits_stage2 (_dcache_io_ptw_req_bits_bits_stage2), // @[HellaCache.scala:278:43]
.io_requestor_0_resp_valid (_ptw_io_requestor_0_resp_valid),
.io_requestor_0_resp_bits_ae_ptw (_ptw_io_requestor_0_resp_bits_ae_ptw),
.io_requestor_0_resp_bits_ae_final (_ptw_io_requestor_0_resp_bits_ae_final),
.io_requestor_0_resp_bits_pf (_ptw_io_requestor_0_resp_bits_pf),
.io_requestor_0_resp_bits_gf (_ptw_io_requestor_0_resp_bits_gf),
.io_requestor_0_resp_bits_hr (_ptw_io_requestor_0_resp_bits_hr),
.io_requestor_0_resp_bits_hw (_ptw_io_requestor_0_resp_bits_hw),
.io_requestor_0_resp_bits_hx (_ptw_io_requestor_0_resp_bits_hx),
.io_requestor_0_resp_bits_pte_reserved_for_future (_ptw_io_requestor_0_resp_bits_pte_reserved_for_future),
.io_requestor_0_resp_bits_pte_ppn (_ptw_io_requestor_0_resp_bits_pte_ppn),
.io_requestor_0_resp_bits_pte_reserved_for_software (_ptw_io_requestor_0_resp_bits_pte_reserved_for_software),
.io_requestor_0_resp_bits_pte_d (_ptw_io_requestor_0_resp_bits_pte_d),
.io_requestor_0_resp_bits_pte_a (_ptw_io_requestor_0_resp_bits_pte_a),
.io_requestor_0_resp_bits_pte_g (_ptw_io_requestor_0_resp_bits_pte_g),
.io_requestor_0_resp_bits_pte_u (_ptw_io_requestor_0_resp_bits_pte_u),
.io_requestor_0_resp_bits_pte_x (_ptw_io_requestor_0_resp_bits_pte_x),
.io_requestor_0_resp_bits_pte_w (_ptw_io_requestor_0_resp_bits_pte_w),
.io_requestor_0_resp_bits_pte_r (_ptw_io_requestor_0_resp_bits_pte_r),
.io_requestor_0_resp_bits_pte_v (_ptw_io_requestor_0_resp_bits_pte_v),
.io_requestor_0_resp_bits_level (_ptw_io_requestor_0_resp_bits_level),
.io_requestor_0_resp_bits_homogeneous (_ptw_io_requestor_0_resp_bits_homogeneous),
.io_requestor_0_resp_bits_gpa_valid (_ptw_io_requestor_0_resp_bits_gpa_valid),
.io_requestor_0_resp_bits_gpa_bits (_ptw_io_requestor_0_resp_bits_gpa_bits),
.io_requestor_0_resp_bits_gpa_is_pte (_ptw_io_requestor_0_resp_bits_gpa_is_pte),
.io_requestor_0_status_debug (_ptw_io_requestor_0_status_debug),
.io_requestor_0_status_cease (_ptw_io_requestor_0_status_cease),
.io_requestor_0_status_wfi (_ptw_io_requestor_0_status_wfi),
.io_requestor_0_status_isa (_ptw_io_requestor_0_status_isa),
.io_requestor_0_status_dv (_ptw_io_requestor_0_status_dv),
.io_requestor_0_status_v (_ptw_io_requestor_0_status_v),
.io_requestor_0_status_sd (_ptw_io_requestor_0_status_sd),
.io_requestor_0_status_mpv (_ptw_io_requestor_0_status_mpv),
.io_requestor_0_status_gva (_ptw_io_requestor_0_status_gva),
.io_requestor_0_status_fs (_ptw_io_requestor_0_status_fs),
.io_requestor_0_status_mpp (_ptw_io_requestor_0_status_mpp),
.io_requestor_0_status_mpie (_ptw_io_requestor_0_status_mpie),
.io_requestor_0_status_mie (_ptw_io_requestor_0_status_mie),
.io_requestor_0_gstatus_debug (_ptw_io_requestor_0_gstatus_debug),
.io_requestor_0_gstatus_cease (_ptw_io_requestor_0_gstatus_cease),
.io_requestor_0_gstatus_wfi (_ptw_io_requestor_0_gstatus_wfi),
.io_requestor_0_gstatus_isa (_ptw_io_requestor_0_gstatus_isa),
.io_requestor_0_gstatus_dprv (_ptw_io_requestor_0_gstatus_dprv),
.io_requestor_0_gstatus_dv (_ptw_io_requestor_0_gstatus_dv),
.io_requestor_0_gstatus_prv (_ptw_io_requestor_0_gstatus_prv),
.io_requestor_0_gstatus_v (_ptw_io_requestor_0_gstatus_v),
.io_requestor_0_gstatus_sd (_ptw_io_requestor_0_gstatus_sd),
.io_requestor_0_gstatus_zero2 (_ptw_io_requestor_0_gstatus_zero2),
.io_requestor_0_gstatus_mpv (_ptw_io_requestor_0_gstatus_mpv),
.io_requestor_0_gstatus_gva (_ptw_io_requestor_0_gstatus_gva),
.io_requestor_0_gstatus_mbe (_ptw_io_requestor_0_gstatus_mbe),
.io_requestor_0_gstatus_sbe (_ptw_io_requestor_0_gstatus_sbe),
.io_requestor_0_gstatus_sxl (_ptw_io_requestor_0_gstatus_sxl),
.io_requestor_0_gstatus_zero1 (_ptw_io_requestor_0_gstatus_zero1),
.io_requestor_0_gstatus_tsr (_ptw_io_requestor_0_gstatus_tsr),
.io_requestor_0_gstatus_tw (_ptw_io_requestor_0_gstatus_tw),
.io_requestor_0_gstatus_tvm (_ptw_io_requestor_0_gstatus_tvm),
.io_requestor_0_gstatus_mxr (_ptw_io_requestor_0_gstatus_mxr),
.io_requestor_0_gstatus_sum (_ptw_io_requestor_0_gstatus_sum),
.io_requestor_0_gstatus_mprv (_ptw_io_requestor_0_gstatus_mprv),
.io_requestor_0_gstatus_fs (_ptw_io_requestor_0_gstatus_fs),
.io_requestor_0_gstatus_mpp (_ptw_io_requestor_0_gstatus_mpp),
.io_requestor_0_gstatus_vs (_ptw_io_requestor_0_gstatus_vs),
.io_requestor_0_gstatus_spp (_ptw_io_requestor_0_gstatus_spp),
.io_requestor_0_gstatus_mpie (_ptw_io_requestor_0_gstatus_mpie),
.io_requestor_0_gstatus_ube (_ptw_io_requestor_0_gstatus_ube),
.io_requestor_0_gstatus_spie (_ptw_io_requestor_0_gstatus_spie),
.io_requestor_0_gstatus_upie (_ptw_io_requestor_0_gstatus_upie),
.io_requestor_0_gstatus_mie (_ptw_io_requestor_0_gstatus_mie),
.io_requestor_0_gstatus_hie (_ptw_io_requestor_0_gstatus_hie),
.io_requestor_0_gstatus_sie (_ptw_io_requestor_0_gstatus_sie),
.io_requestor_0_gstatus_uie (_ptw_io_requestor_0_gstatus_uie),
.io_requestor_0_pmp_0_cfg_l (_ptw_io_requestor_0_pmp_0_cfg_l),
.io_requestor_0_pmp_0_cfg_a (_ptw_io_requestor_0_pmp_0_cfg_a),
.io_requestor_0_pmp_0_cfg_x (_ptw_io_requestor_0_pmp_0_cfg_x),
.io_requestor_0_pmp_0_cfg_w (_ptw_io_requestor_0_pmp_0_cfg_w),
.io_requestor_0_pmp_0_cfg_r (_ptw_io_requestor_0_pmp_0_cfg_r),
.io_requestor_0_pmp_0_addr (_ptw_io_requestor_0_pmp_0_addr),
.io_requestor_0_pmp_0_mask (_ptw_io_requestor_0_pmp_0_mask),
.io_requestor_0_pmp_1_cfg_l (_ptw_io_requestor_0_pmp_1_cfg_l),
.io_requestor_0_pmp_1_cfg_a (_ptw_io_requestor_0_pmp_1_cfg_a),
.io_requestor_0_pmp_1_cfg_x (_ptw_io_requestor_0_pmp_1_cfg_x),
.io_requestor_0_pmp_1_cfg_w (_ptw_io_requestor_0_pmp_1_cfg_w),
.io_requestor_0_pmp_1_cfg_r (_ptw_io_requestor_0_pmp_1_cfg_r),
.io_requestor_0_pmp_1_addr (_ptw_io_requestor_0_pmp_1_addr),
.io_requestor_0_pmp_1_mask (_ptw_io_requestor_0_pmp_1_mask),
.io_requestor_0_pmp_2_cfg_l (_ptw_io_requestor_0_pmp_2_cfg_l),
.io_requestor_0_pmp_2_cfg_a (_ptw_io_requestor_0_pmp_2_cfg_a),
.io_requestor_0_pmp_2_cfg_x (_ptw_io_requestor_0_pmp_2_cfg_x),
.io_requestor_0_pmp_2_cfg_w (_ptw_io_requestor_0_pmp_2_cfg_w),
.io_requestor_0_pmp_2_cfg_r (_ptw_io_requestor_0_pmp_2_cfg_r),
.io_requestor_0_pmp_2_addr (_ptw_io_requestor_0_pmp_2_addr),
.io_requestor_0_pmp_2_mask (_ptw_io_requestor_0_pmp_2_mask),
.io_requestor_0_pmp_3_cfg_l (_ptw_io_requestor_0_pmp_3_cfg_l),
.io_requestor_0_pmp_3_cfg_a (_ptw_io_requestor_0_pmp_3_cfg_a),
.io_requestor_0_pmp_3_cfg_x (_ptw_io_requestor_0_pmp_3_cfg_x),
.io_requestor_0_pmp_3_cfg_w (_ptw_io_requestor_0_pmp_3_cfg_w),
.io_requestor_0_pmp_3_cfg_r (_ptw_io_requestor_0_pmp_3_cfg_r),
.io_requestor_0_pmp_3_addr (_ptw_io_requestor_0_pmp_3_addr),
.io_requestor_0_pmp_3_mask (_ptw_io_requestor_0_pmp_3_mask),
.io_requestor_0_pmp_4_cfg_l (_ptw_io_requestor_0_pmp_4_cfg_l),
.io_requestor_0_pmp_4_cfg_a (_ptw_io_requestor_0_pmp_4_cfg_a),
.io_requestor_0_pmp_4_cfg_x (_ptw_io_requestor_0_pmp_4_cfg_x),
.io_requestor_0_pmp_4_cfg_w (_ptw_io_requestor_0_pmp_4_cfg_w),
.io_requestor_0_pmp_4_cfg_r (_ptw_io_requestor_0_pmp_4_cfg_r),
.io_requestor_0_pmp_4_addr (_ptw_io_requestor_0_pmp_4_addr),
.io_requestor_0_pmp_4_mask (_ptw_io_requestor_0_pmp_4_mask),
.io_requestor_0_pmp_5_cfg_l (_ptw_io_requestor_0_pmp_5_cfg_l),
.io_requestor_0_pmp_5_cfg_a (_ptw_io_requestor_0_pmp_5_cfg_a),
.io_requestor_0_pmp_5_cfg_x (_ptw_io_requestor_0_pmp_5_cfg_x),
.io_requestor_0_pmp_5_cfg_w (_ptw_io_requestor_0_pmp_5_cfg_w),
.io_requestor_0_pmp_5_cfg_r (_ptw_io_requestor_0_pmp_5_cfg_r),
.io_requestor_0_pmp_5_addr (_ptw_io_requestor_0_pmp_5_addr),
.io_requestor_0_pmp_5_mask (_ptw_io_requestor_0_pmp_5_mask),
.io_requestor_0_pmp_6_cfg_l (_ptw_io_requestor_0_pmp_6_cfg_l),
.io_requestor_0_pmp_6_cfg_a (_ptw_io_requestor_0_pmp_6_cfg_a),
.io_requestor_0_pmp_6_cfg_x (_ptw_io_requestor_0_pmp_6_cfg_x),
.io_requestor_0_pmp_6_cfg_w (_ptw_io_requestor_0_pmp_6_cfg_w),
.io_requestor_0_pmp_6_cfg_r (_ptw_io_requestor_0_pmp_6_cfg_r),
.io_requestor_0_pmp_6_addr (_ptw_io_requestor_0_pmp_6_addr),
.io_requestor_0_pmp_6_mask (_ptw_io_requestor_0_pmp_6_mask),
.io_requestor_0_pmp_7_cfg_l (_ptw_io_requestor_0_pmp_7_cfg_l),
.io_requestor_0_pmp_7_cfg_a (_ptw_io_requestor_0_pmp_7_cfg_a),
.io_requestor_0_pmp_7_cfg_x (_ptw_io_requestor_0_pmp_7_cfg_x),
.io_requestor_0_pmp_7_cfg_w (_ptw_io_requestor_0_pmp_7_cfg_w),
.io_requestor_0_pmp_7_cfg_r (_ptw_io_requestor_0_pmp_7_cfg_r),
.io_requestor_0_pmp_7_addr (_ptw_io_requestor_0_pmp_7_addr),
.io_requestor_0_pmp_7_mask (_ptw_io_requestor_0_pmp_7_mask),
.io_requestor_0_customCSRs_csrs_0_ren (_ptw_io_requestor_0_customCSRs_csrs_0_ren),
.io_requestor_0_customCSRs_csrs_0_wen (_ptw_io_requestor_0_customCSRs_csrs_0_wen),
.io_requestor_0_customCSRs_csrs_0_wdata (_ptw_io_requestor_0_customCSRs_csrs_0_wdata),
.io_requestor_0_customCSRs_csrs_0_value (_ptw_io_requestor_0_customCSRs_csrs_0_value),
.io_requestor_0_customCSRs_csrs_1_ren (_ptw_io_requestor_0_customCSRs_csrs_1_ren),
.io_requestor_0_customCSRs_csrs_1_wen (_ptw_io_requestor_0_customCSRs_csrs_1_wen),
.io_requestor_0_customCSRs_csrs_1_wdata (_ptw_io_requestor_0_customCSRs_csrs_1_wdata),
.io_requestor_0_customCSRs_csrs_1_value (_ptw_io_requestor_0_customCSRs_csrs_1_value),
.io_requestor_0_customCSRs_csrs_2_ren (_ptw_io_requestor_0_customCSRs_csrs_2_ren),
.io_requestor_0_customCSRs_csrs_2_wen (_ptw_io_requestor_0_customCSRs_csrs_2_wen),
.io_requestor_0_customCSRs_csrs_2_wdata (_ptw_io_requestor_0_customCSRs_csrs_2_wdata),
.io_requestor_0_customCSRs_csrs_2_value (_ptw_io_requestor_0_customCSRs_csrs_2_value),
.io_requestor_0_customCSRs_csrs_3_ren (_ptw_io_requestor_0_customCSRs_csrs_3_ren),
.io_requestor_0_customCSRs_csrs_3_wen (_ptw_io_requestor_0_customCSRs_csrs_3_wen),
.io_requestor_0_customCSRs_csrs_3_wdata (_ptw_io_requestor_0_customCSRs_csrs_3_wdata),
.io_requestor_0_customCSRs_csrs_3_value (_ptw_io_requestor_0_customCSRs_csrs_3_value),
.io_requestor_1_req_ready (_ptw_io_requestor_1_req_ready),
.io_requestor_1_req_bits_valid (_frontend_io_ptw_req_bits_valid), // @[Frontend.scala:393:28]
.io_requestor_1_req_bits_bits_addr (_frontend_io_ptw_req_bits_bits_addr), // @[Frontend.scala:393:28]
.io_requestor_1_req_bits_bits_need_gpa (_frontend_io_ptw_req_bits_bits_need_gpa), // @[Frontend.scala:393:28]
.io_requestor_1_req_bits_bits_vstage1 (_frontend_io_ptw_req_bits_bits_vstage1), // @[Frontend.scala:393:28]
.io_requestor_1_req_bits_bits_stage2 (_frontend_io_ptw_req_bits_bits_stage2), // @[Frontend.scala:393:28]
.io_requestor_1_resp_valid (_ptw_io_requestor_1_resp_valid),
.io_requestor_1_resp_bits_ae_ptw (_ptw_io_requestor_1_resp_bits_ae_ptw),
.io_requestor_1_resp_bits_ae_final (_ptw_io_requestor_1_resp_bits_ae_final),
.io_requestor_1_resp_bits_pf (_ptw_io_requestor_1_resp_bits_pf),
.io_requestor_1_resp_bits_gf (_ptw_io_requestor_1_resp_bits_gf),
.io_requestor_1_resp_bits_hr (_ptw_io_requestor_1_resp_bits_hr),
.io_requestor_1_resp_bits_hw (_ptw_io_requestor_1_resp_bits_hw),
.io_requestor_1_resp_bits_hx (_ptw_io_requestor_1_resp_bits_hx),
.io_requestor_1_resp_bits_pte_reserved_for_future (_ptw_io_requestor_1_resp_bits_pte_reserved_for_future),
.io_requestor_1_resp_bits_pte_ppn (_ptw_io_requestor_1_resp_bits_pte_ppn),
.io_requestor_1_resp_bits_pte_reserved_for_software (_ptw_io_requestor_1_resp_bits_pte_reserved_for_software),
.io_requestor_1_resp_bits_pte_d (_ptw_io_requestor_1_resp_bits_pte_d),
.io_requestor_1_resp_bits_pte_a (_ptw_io_requestor_1_resp_bits_pte_a),
.io_requestor_1_resp_bits_pte_g (_ptw_io_requestor_1_resp_bits_pte_g),
.io_requestor_1_resp_bits_pte_u (_ptw_io_requestor_1_resp_bits_pte_u),
.io_requestor_1_resp_bits_pte_x (_ptw_io_requestor_1_resp_bits_pte_x),
.io_requestor_1_resp_bits_pte_w (_ptw_io_requestor_1_resp_bits_pte_w),
.io_requestor_1_resp_bits_pte_r (_ptw_io_requestor_1_resp_bits_pte_r),
.io_requestor_1_resp_bits_pte_v (_ptw_io_requestor_1_resp_bits_pte_v),
.io_requestor_1_resp_bits_level (_ptw_io_requestor_1_resp_bits_level),
.io_requestor_1_resp_bits_homogeneous (_ptw_io_requestor_1_resp_bits_homogeneous),
.io_requestor_1_resp_bits_gpa_valid (_ptw_io_requestor_1_resp_bits_gpa_valid),
.io_requestor_1_resp_bits_gpa_bits (_ptw_io_requestor_1_resp_bits_gpa_bits),
.io_requestor_1_resp_bits_gpa_is_pte (_ptw_io_requestor_1_resp_bits_gpa_is_pte),
.io_requestor_1_status_debug (_ptw_io_requestor_1_status_debug),
.io_requestor_1_status_cease (_ptw_io_requestor_1_status_cease),
.io_requestor_1_status_wfi (_ptw_io_requestor_1_status_wfi),
.io_requestor_1_status_isa (_ptw_io_requestor_1_status_isa),
.io_requestor_1_status_dv (_ptw_io_requestor_1_status_dv),
.io_requestor_1_status_v (_ptw_io_requestor_1_status_v),
.io_requestor_1_status_sd (_ptw_io_requestor_1_status_sd),
.io_requestor_1_status_mpv (_ptw_io_requestor_1_status_mpv),
.io_requestor_1_status_gva (_ptw_io_requestor_1_status_gva),
.io_requestor_1_status_fs (_ptw_io_requestor_1_status_fs),
.io_requestor_1_status_mpp (_ptw_io_requestor_1_status_mpp),
.io_requestor_1_status_mpie (_ptw_io_requestor_1_status_mpie),
.io_requestor_1_status_mie (_ptw_io_requestor_1_status_mie),
.io_requestor_1_gstatus_debug (_ptw_io_requestor_1_gstatus_debug),
.io_requestor_1_gstatus_cease (_ptw_io_requestor_1_gstatus_cease),
.io_requestor_1_gstatus_wfi (_ptw_io_requestor_1_gstatus_wfi),
.io_requestor_1_gstatus_isa (_ptw_io_requestor_1_gstatus_isa),
.io_requestor_1_gstatus_dprv (_ptw_io_requestor_1_gstatus_dprv),
.io_requestor_1_gstatus_dv (_ptw_io_requestor_1_gstatus_dv),
.io_requestor_1_gstatus_prv (_ptw_io_requestor_1_gstatus_prv),
.io_requestor_1_gstatus_v (_ptw_io_requestor_1_gstatus_v),
.io_requestor_1_gstatus_sd (_ptw_io_requestor_1_gstatus_sd),
.io_requestor_1_gstatus_zero2 (_ptw_io_requestor_1_gstatus_zero2),
.io_requestor_1_gstatus_mpv (_ptw_io_requestor_1_gstatus_mpv),
.io_requestor_1_gstatus_gva (_ptw_io_requestor_1_gstatus_gva),
.io_requestor_1_gstatus_mbe (_ptw_io_requestor_1_gstatus_mbe),
.io_requestor_1_gstatus_sbe (_ptw_io_requestor_1_gstatus_sbe),
.io_requestor_1_gstatus_sxl (_ptw_io_requestor_1_gstatus_sxl),
.io_requestor_1_gstatus_zero1 (_ptw_io_requestor_1_gstatus_zero1),
.io_requestor_1_gstatus_tsr (_ptw_io_requestor_1_gstatus_tsr),
.io_requestor_1_gstatus_tw (_ptw_io_requestor_1_gstatus_tw),
.io_requestor_1_gstatus_tvm (_ptw_io_requestor_1_gstatus_tvm),
.io_requestor_1_gstatus_mxr (_ptw_io_requestor_1_gstatus_mxr),
.io_requestor_1_gstatus_sum (_ptw_io_requestor_1_gstatus_sum),
.io_requestor_1_gstatus_mprv (_ptw_io_requestor_1_gstatus_mprv),
.io_requestor_1_gstatus_fs (_ptw_io_requestor_1_gstatus_fs),
.io_requestor_1_gstatus_mpp (_ptw_io_requestor_1_gstatus_mpp),
.io_requestor_1_gstatus_vs (_ptw_io_requestor_1_gstatus_vs),
.io_requestor_1_gstatus_spp (_ptw_io_requestor_1_gstatus_spp),
.io_requestor_1_gstatus_mpie (_ptw_io_requestor_1_gstatus_mpie),
.io_requestor_1_gstatus_ube (_ptw_io_requestor_1_gstatus_ube),
.io_requestor_1_gstatus_spie (_ptw_io_requestor_1_gstatus_spie),
.io_requestor_1_gstatus_upie (_ptw_io_requestor_1_gstatus_upie),
.io_requestor_1_gstatus_mie (_ptw_io_requestor_1_gstatus_mie),
.io_requestor_1_gstatus_hie (_ptw_io_requestor_1_gstatus_hie),
.io_requestor_1_gstatus_sie (_ptw_io_requestor_1_gstatus_sie),
.io_requestor_1_gstatus_uie (_ptw_io_requestor_1_gstatus_uie),
.io_requestor_1_pmp_0_cfg_l (_ptw_io_requestor_1_pmp_0_cfg_l),
.io_requestor_1_pmp_0_cfg_a (_ptw_io_requestor_1_pmp_0_cfg_a),
.io_requestor_1_pmp_0_cfg_x (_ptw_io_requestor_1_pmp_0_cfg_x),
.io_requestor_1_pmp_0_cfg_w (_ptw_io_requestor_1_pmp_0_cfg_w),
.io_requestor_1_pmp_0_cfg_r (_ptw_io_requestor_1_pmp_0_cfg_r),
.io_requestor_1_pmp_0_addr (_ptw_io_requestor_1_pmp_0_addr),
.io_requestor_1_pmp_0_mask (_ptw_io_requestor_1_pmp_0_mask),
.io_requestor_1_pmp_1_cfg_l (_ptw_io_requestor_1_pmp_1_cfg_l),
.io_requestor_1_pmp_1_cfg_a (_ptw_io_requestor_1_pmp_1_cfg_a),
.io_requestor_1_pmp_1_cfg_x (_ptw_io_requestor_1_pmp_1_cfg_x),
.io_requestor_1_pmp_1_cfg_w (_ptw_io_requestor_1_pmp_1_cfg_w),
.io_requestor_1_pmp_1_cfg_r (_ptw_io_requestor_1_pmp_1_cfg_r),
.io_requestor_1_pmp_1_addr (_ptw_io_requestor_1_pmp_1_addr),
.io_requestor_1_pmp_1_mask (_ptw_io_requestor_1_pmp_1_mask),
.io_requestor_1_pmp_2_cfg_l (_ptw_io_requestor_1_pmp_2_cfg_l),
.io_requestor_1_pmp_2_cfg_a (_ptw_io_requestor_1_pmp_2_cfg_a),
.io_requestor_1_pmp_2_cfg_x (_ptw_io_requestor_1_pmp_2_cfg_x),
.io_requestor_1_pmp_2_cfg_w (_ptw_io_requestor_1_pmp_2_cfg_w),
.io_requestor_1_pmp_2_cfg_r (_ptw_io_requestor_1_pmp_2_cfg_r),
.io_requestor_1_pmp_2_addr (_ptw_io_requestor_1_pmp_2_addr),
.io_requestor_1_pmp_2_mask (_ptw_io_requestor_1_pmp_2_mask),
.io_requestor_1_pmp_3_cfg_l (_ptw_io_requestor_1_pmp_3_cfg_l),
.io_requestor_1_pmp_3_cfg_a (_ptw_io_requestor_1_pmp_3_cfg_a),
.io_requestor_1_pmp_3_cfg_x (_ptw_io_requestor_1_pmp_3_cfg_x),
.io_requestor_1_pmp_3_cfg_w (_ptw_io_requestor_1_pmp_3_cfg_w),
.io_requestor_1_pmp_3_cfg_r (_ptw_io_requestor_1_pmp_3_cfg_r),
.io_requestor_1_pmp_3_addr (_ptw_io_requestor_1_pmp_3_addr),
.io_requestor_1_pmp_3_mask (_ptw_io_requestor_1_pmp_3_mask),
.io_requestor_1_pmp_4_cfg_l (_ptw_io_requestor_1_pmp_4_cfg_l),
.io_requestor_1_pmp_4_cfg_a (_ptw_io_requestor_1_pmp_4_cfg_a),
.io_requestor_1_pmp_4_cfg_x (_ptw_io_requestor_1_pmp_4_cfg_x),
.io_requestor_1_pmp_4_cfg_w (_ptw_io_requestor_1_pmp_4_cfg_w),
.io_requestor_1_pmp_4_cfg_r (_ptw_io_requestor_1_pmp_4_cfg_r),
.io_requestor_1_pmp_4_addr (_ptw_io_requestor_1_pmp_4_addr),
.io_requestor_1_pmp_4_mask (_ptw_io_requestor_1_pmp_4_mask),
.io_requestor_1_pmp_5_cfg_l (_ptw_io_requestor_1_pmp_5_cfg_l),
.io_requestor_1_pmp_5_cfg_a (_ptw_io_requestor_1_pmp_5_cfg_a),
.io_requestor_1_pmp_5_cfg_x (_ptw_io_requestor_1_pmp_5_cfg_x),
.io_requestor_1_pmp_5_cfg_w (_ptw_io_requestor_1_pmp_5_cfg_w),
.io_requestor_1_pmp_5_cfg_r (_ptw_io_requestor_1_pmp_5_cfg_r),
.io_requestor_1_pmp_5_addr (_ptw_io_requestor_1_pmp_5_addr),
.io_requestor_1_pmp_5_mask (_ptw_io_requestor_1_pmp_5_mask),
.io_requestor_1_pmp_6_cfg_l (_ptw_io_requestor_1_pmp_6_cfg_l),
.io_requestor_1_pmp_6_cfg_a (_ptw_io_requestor_1_pmp_6_cfg_a),
.io_requestor_1_pmp_6_cfg_x (_ptw_io_requestor_1_pmp_6_cfg_x),
.io_requestor_1_pmp_6_cfg_w (_ptw_io_requestor_1_pmp_6_cfg_w),
.io_requestor_1_pmp_6_cfg_r (_ptw_io_requestor_1_pmp_6_cfg_r),
.io_requestor_1_pmp_6_addr (_ptw_io_requestor_1_pmp_6_addr),
.io_requestor_1_pmp_6_mask (_ptw_io_requestor_1_pmp_6_mask),
.io_requestor_1_pmp_7_cfg_l (_ptw_io_requestor_1_pmp_7_cfg_l),
.io_requestor_1_pmp_7_cfg_a (_ptw_io_requestor_1_pmp_7_cfg_a),
.io_requestor_1_pmp_7_cfg_x (_ptw_io_requestor_1_pmp_7_cfg_x),
.io_requestor_1_pmp_7_cfg_w (_ptw_io_requestor_1_pmp_7_cfg_w),
.io_requestor_1_pmp_7_cfg_r (_ptw_io_requestor_1_pmp_7_cfg_r),
.io_requestor_1_pmp_7_addr (_ptw_io_requestor_1_pmp_7_addr),
.io_requestor_1_pmp_7_mask (_ptw_io_requestor_1_pmp_7_mask),
.io_requestor_1_customCSRs_csrs_0_ren (_ptw_io_requestor_1_customCSRs_csrs_0_ren),
.io_requestor_1_customCSRs_csrs_0_wen (_ptw_io_requestor_1_customCSRs_csrs_0_wen),
.io_requestor_1_customCSRs_csrs_0_wdata (_ptw_io_requestor_1_customCSRs_csrs_0_wdata),
.io_requestor_1_customCSRs_csrs_0_value (_ptw_io_requestor_1_customCSRs_csrs_0_value),
.io_requestor_1_customCSRs_csrs_1_ren (_ptw_io_requestor_1_customCSRs_csrs_1_ren),
.io_requestor_1_customCSRs_csrs_1_wen (_ptw_io_requestor_1_customCSRs_csrs_1_wen),
.io_requestor_1_customCSRs_csrs_1_wdata (_ptw_io_requestor_1_customCSRs_csrs_1_wdata),
.io_requestor_1_customCSRs_csrs_1_value (_ptw_io_requestor_1_customCSRs_csrs_1_value),
.io_requestor_1_customCSRs_csrs_2_ren (_ptw_io_requestor_1_customCSRs_csrs_2_ren),
.io_requestor_1_customCSRs_csrs_2_wen (_ptw_io_requestor_1_customCSRs_csrs_2_wen),
.io_requestor_1_customCSRs_csrs_2_wdata (_ptw_io_requestor_1_customCSRs_csrs_2_wdata),
.io_requestor_1_customCSRs_csrs_2_value (_ptw_io_requestor_1_customCSRs_csrs_2_value),
.io_requestor_1_customCSRs_csrs_3_ren (_ptw_io_requestor_1_customCSRs_csrs_3_ren),
.io_requestor_1_customCSRs_csrs_3_wen (_ptw_io_requestor_1_customCSRs_csrs_3_wen),
.io_requestor_1_customCSRs_csrs_3_wdata (_ptw_io_requestor_1_customCSRs_csrs_3_wdata),
.io_requestor_1_customCSRs_csrs_3_value (_ptw_io_requestor_1_customCSRs_csrs_3_value),
.io_dpath_sfence_valid (_core_io_ptw_sfence_valid), // @[RocketTile.scala:147:20]
.io_dpath_sfence_bits_rs1 (_core_io_ptw_sfence_bits_rs1), // @[RocketTile.scala:147:20]
.io_dpath_sfence_bits_rs2 (_core_io_ptw_sfence_bits_rs2), // @[RocketTile.scala:147:20]
.io_dpath_sfence_bits_addr (_core_io_ptw_sfence_bits_addr), // @[RocketTile.scala:147:20]
.io_dpath_sfence_bits_asid (_core_io_ptw_sfence_bits_asid), // @[RocketTile.scala:147:20]
.io_dpath_sfence_bits_hv (_core_io_ptw_sfence_bits_hv), // @[RocketTile.scala:147:20]
.io_dpath_sfence_bits_hg (_core_io_ptw_sfence_bits_hg), // @[RocketTile.scala:147:20]
.io_dpath_status_debug (_core_io_ptw_status_debug), // @[RocketTile.scala:147:20]
.io_dpath_status_cease (_core_io_ptw_status_cease), // @[RocketTile.scala:147:20]
.io_dpath_status_wfi (_core_io_ptw_status_wfi), // @[RocketTile.scala:147:20]
.io_dpath_status_isa (_core_io_ptw_status_isa), // @[RocketTile.scala:147:20]
.io_dpath_status_dv (_core_io_ptw_status_dv), // @[RocketTile.scala:147:20]
.io_dpath_status_v (_core_io_ptw_status_v), // @[RocketTile.scala:147:20]
.io_dpath_status_sd (_core_io_ptw_status_sd), // @[RocketTile.scala:147:20]
.io_dpath_status_mpv (_core_io_ptw_status_mpv), // @[RocketTile.scala:147:20]
.io_dpath_status_gva (_core_io_ptw_status_gva), // @[RocketTile.scala:147:20]
.io_dpath_status_fs (_core_io_ptw_status_fs), // @[RocketTile.scala:147:20]
.io_dpath_status_mpp (_core_io_ptw_status_mpp), // @[RocketTile.scala:147:20]
.io_dpath_status_mpie (_core_io_ptw_status_mpie), // @[RocketTile.scala:147:20]
.io_dpath_status_mie (_core_io_ptw_status_mie), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_debug (_core_io_ptw_gstatus_debug), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_cease (_core_io_ptw_gstatus_cease), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_wfi (_core_io_ptw_gstatus_wfi), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_isa (_core_io_ptw_gstatus_isa), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_dprv (_core_io_ptw_gstatus_dprv), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_dv (_core_io_ptw_gstatus_dv), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_prv (_core_io_ptw_gstatus_prv), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_v (_core_io_ptw_gstatus_v), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_sd (_core_io_ptw_gstatus_sd), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_zero2 (_core_io_ptw_gstatus_zero2), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_mpv (_core_io_ptw_gstatus_mpv), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_gva (_core_io_ptw_gstatus_gva), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_mbe (_core_io_ptw_gstatus_mbe), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_sbe (_core_io_ptw_gstatus_sbe), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_sxl (_core_io_ptw_gstatus_sxl), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_zero1 (_core_io_ptw_gstatus_zero1), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_tsr (_core_io_ptw_gstatus_tsr), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_tw (_core_io_ptw_gstatus_tw), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_tvm (_core_io_ptw_gstatus_tvm), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_mxr (_core_io_ptw_gstatus_mxr), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_sum (_core_io_ptw_gstatus_sum), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_mprv (_core_io_ptw_gstatus_mprv), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_fs (_core_io_ptw_gstatus_fs), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_mpp (_core_io_ptw_gstatus_mpp), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_vs (_core_io_ptw_gstatus_vs), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_spp (_core_io_ptw_gstatus_spp), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_mpie (_core_io_ptw_gstatus_mpie), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_ube (_core_io_ptw_gstatus_ube), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_spie (_core_io_ptw_gstatus_spie), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_upie (_core_io_ptw_gstatus_upie), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_mie (_core_io_ptw_gstatus_mie), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_hie (_core_io_ptw_gstatus_hie), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_sie (_core_io_ptw_gstatus_sie), // @[RocketTile.scala:147:20]
.io_dpath_gstatus_uie (_core_io_ptw_gstatus_uie), // @[RocketTile.scala:147:20]
.io_dpath_pmp_0_cfg_l (_core_io_ptw_pmp_0_cfg_l), // @[RocketTile.scala:147:20]
.io_dpath_pmp_0_cfg_a (_core_io_ptw_pmp_0_cfg_a), // @[RocketTile.scala:147:20]
.io_dpath_pmp_0_cfg_x (_core_io_ptw_pmp_0_cfg_x), // @[RocketTile.scala:147:20]
.io_dpath_pmp_0_cfg_w (_core_io_ptw_pmp_0_cfg_w), // @[RocketTile.scala:147:20]
.io_dpath_pmp_0_cfg_r (_core_io_ptw_pmp_0_cfg_r), // @[RocketTile.scala:147:20]
.io_dpath_pmp_0_addr (_core_io_ptw_pmp_0_addr), // @[RocketTile.scala:147:20]
.io_dpath_pmp_0_mask (_core_io_ptw_pmp_0_mask), // @[RocketTile.scala:147:20]
.io_dpath_pmp_1_cfg_l (_core_io_ptw_pmp_1_cfg_l), // @[RocketTile.scala:147:20]
.io_dpath_pmp_1_cfg_a (_core_io_ptw_pmp_1_cfg_a), // @[RocketTile.scala:147:20]
.io_dpath_pmp_1_cfg_x (_core_io_ptw_pmp_1_cfg_x), // @[RocketTile.scala:147:20]
.io_dpath_pmp_1_cfg_w (_core_io_ptw_pmp_1_cfg_w), // @[RocketTile.scala:147:20]
.io_dpath_pmp_1_cfg_r (_core_io_ptw_pmp_1_cfg_r), // @[RocketTile.scala:147:20]
.io_dpath_pmp_1_addr (_core_io_ptw_pmp_1_addr), // @[RocketTile.scala:147:20]
.io_dpath_pmp_1_mask (_core_io_ptw_pmp_1_mask), // @[RocketTile.scala:147:20]
.io_dpath_pmp_2_cfg_l (_core_io_ptw_pmp_2_cfg_l), // @[RocketTile.scala:147:20]
.io_dpath_pmp_2_cfg_a (_core_io_ptw_pmp_2_cfg_a), // @[RocketTile.scala:147:20]
.io_dpath_pmp_2_cfg_x (_core_io_ptw_pmp_2_cfg_x), // @[RocketTile.scala:147:20]
.io_dpath_pmp_2_cfg_w (_core_io_ptw_pmp_2_cfg_w), // @[RocketTile.scala:147:20]
.io_dpath_pmp_2_cfg_r (_core_io_ptw_pmp_2_cfg_r), // @[RocketTile.scala:147:20]
.io_dpath_pmp_2_addr (_core_io_ptw_pmp_2_addr), // @[RocketTile.scala:147:20]
.io_dpath_pmp_2_mask (_core_io_ptw_pmp_2_mask), // @[RocketTile.scala:147:20]
.io_dpath_pmp_3_cfg_l (_core_io_ptw_pmp_3_cfg_l), // @[RocketTile.scala:147:20]
.io_dpath_pmp_3_cfg_a (_core_io_ptw_pmp_3_cfg_a), // @[RocketTile.scala:147:20]
.io_dpath_pmp_3_cfg_x (_core_io_ptw_pmp_3_cfg_x), // @[RocketTile.scala:147:20]
.io_dpath_pmp_3_cfg_w (_core_io_ptw_pmp_3_cfg_w), // @[RocketTile.scala:147:20]
.io_dpath_pmp_3_cfg_r (_core_io_ptw_pmp_3_cfg_r), // @[RocketTile.scala:147:20]
.io_dpath_pmp_3_addr (_core_io_ptw_pmp_3_addr), // @[RocketTile.scala:147:20]
.io_dpath_pmp_3_mask (_core_io_ptw_pmp_3_mask), // @[RocketTile.scala:147:20]
.io_dpath_pmp_4_cfg_l (_core_io_ptw_pmp_4_cfg_l), // @[RocketTile.scala:147:20]
.io_dpath_pmp_4_cfg_a (_core_io_ptw_pmp_4_cfg_a), // @[RocketTile.scala:147:20]
.io_dpath_pmp_4_cfg_x (_core_io_ptw_pmp_4_cfg_x), // @[RocketTile.scala:147:20]
.io_dpath_pmp_4_cfg_w (_core_io_ptw_pmp_4_cfg_w), // @[RocketTile.scala:147:20]
.io_dpath_pmp_4_cfg_r (_core_io_ptw_pmp_4_cfg_r), // @[RocketTile.scala:147:20]
.io_dpath_pmp_4_addr (_core_io_ptw_pmp_4_addr), // @[RocketTile.scala:147:20]
.io_dpath_pmp_4_mask (_core_io_ptw_pmp_4_mask), // @[RocketTile.scala:147:20]
.io_dpath_pmp_5_cfg_l (_core_io_ptw_pmp_5_cfg_l), // @[RocketTile.scala:147:20]
.io_dpath_pmp_5_cfg_a (_core_io_ptw_pmp_5_cfg_a), // @[RocketTile.scala:147:20]
.io_dpath_pmp_5_cfg_x (_core_io_ptw_pmp_5_cfg_x), // @[RocketTile.scala:147:20]
.io_dpath_pmp_5_cfg_w (_core_io_ptw_pmp_5_cfg_w), // @[RocketTile.scala:147:20]
.io_dpath_pmp_5_cfg_r (_core_io_ptw_pmp_5_cfg_r), // @[RocketTile.scala:147:20]
.io_dpath_pmp_5_addr (_core_io_ptw_pmp_5_addr), // @[RocketTile.scala:147:20]
.io_dpath_pmp_5_mask (_core_io_ptw_pmp_5_mask), // @[RocketTile.scala:147:20]
.io_dpath_pmp_6_cfg_l (_core_io_ptw_pmp_6_cfg_l), // @[RocketTile.scala:147:20]
.io_dpath_pmp_6_cfg_a (_core_io_ptw_pmp_6_cfg_a), // @[RocketTile.scala:147:20]
.io_dpath_pmp_6_cfg_x (_core_io_ptw_pmp_6_cfg_x), // @[RocketTile.scala:147:20]
.io_dpath_pmp_6_cfg_w (_core_io_ptw_pmp_6_cfg_w), // @[RocketTile.scala:147:20]
.io_dpath_pmp_6_cfg_r (_core_io_ptw_pmp_6_cfg_r), // @[RocketTile.scala:147:20]
.io_dpath_pmp_6_addr (_core_io_ptw_pmp_6_addr), // @[RocketTile.scala:147:20]
.io_dpath_pmp_6_mask (_core_io_ptw_pmp_6_mask), // @[RocketTile.scala:147:20]
.io_dpath_pmp_7_cfg_l (_core_io_ptw_pmp_7_cfg_l), // @[RocketTile.scala:147:20]
.io_dpath_pmp_7_cfg_a (_core_io_ptw_pmp_7_cfg_a), // @[RocketTile.scala:147:20]
.io_dpath_pmp_7_cfg_x (_core_io_ptw_pmp_7_cfg_x), // @[RocketTile.scala:147:20]
.io_dpath_pmp_7_cfg_w (_core_io_ptw_pmp_7_cfg_w), // @[RocketTile.scala:147:20]
.io_dpath_pmp_7_cfg_r (_core_io_ptw_pmp_7_cfg_r), // @[RocketTile.scala:147:20]
.io_dpath_pmp_7_addr (_core_io_ptw_pmp_7_addr), // @[RocketTile.scala:147:20]
.io_dpath_pmp_7_mask (_core_io_ptw_pmp_7_mask), // @[RocketTile.scala:147:20]
.io_dpath_perf_pte_miss (_ptw_io_dpath_perf_pte_miss),
.io_dpath_perf_pte_hit (_ptw_io_dpath_perf_pte_hit),
.io_dpath_customCSRs_csrs_0_ren (_core_io_ptw_customCSRs_csrs_0_ren), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_0_wen (_core_io_ptw_customCSRs_csrs_0_wen), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_0_wdata (_core_io_ptw_customCSRs_csrs_0_wdata), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_0_value (_core_io_ptw_customCSRs_csrs_0_value), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_1_ren (_core_io_ptw_customCSRs_csrs_1_ren), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_1_wen (_core_io_ptw_customCSRs_csrs_1_wen), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_1_wdata (_core_io_ptw_customCSRs_csrs_1_wdata), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_1_value (_core_io_ptw_customCSRs_csrs_1_value), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_2_ren (_core_io_ptw_customCSRs_csrs_2_ren), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_2_wen (_core_io_ptw_customCSRs_csrs_2_wen), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_2_wdata (_core_io_ptw_customCSRs_csrs_2_wdata), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_2_value (_core_io_ptw_customCSRs_csrs_2_value), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_3_ren (_core_io_ptw_customCSRs_csrs_3_ren), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_3_wen (_core_io_ptw_customCSRs_csrs_3_wen), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_3_wdata (_core_io_ptw_customCSRs_csrs_3_wdata), // @[RocketTile.scala:147:20]
.io_dpath_customCSRs_csrs_3_value (_core_io_ptw_customCSRs_csrs_3_value) // @[RocketTile.scala:147:20]
); // @[PTW.scala:802:19]
Rocket core ( // @[RocketTile.scala:147:20]
.clock (clock),
.reset (reset),
.io_hartid (hartIdSinkNodeIn), // @[MixedNode.scala:551:17]
.io_interrupts_debug (intSinkNodeIn_0), // @[MixedNode.scala:551:17]
.io_interrupts_mtip (intSinkNodeIn_2), // @[MixedNode.scala:551:17]
.io_interrupts_msip (intSinkNodeIn_1), // @[MixedNode.scala:551:17]
.io_interrupts_meip (intSinkNodeIn_3), // @[MixedNode.scala:551:17]
.io_imem_might_request (_core_io_imem_might_request),
.io_imem_req_valid (_core_io_imem_req_valid),
.io_imem_req_bits_pc (_core_io_imem_req_bits_pc),
.io_imem_req_bits_speculative (_core_io_imem_req_bits_speculative),
.io_imem_sfence_valid (_core_io_imem_sfence_valid),
.io_imem_sfence_bits_rs1 (_core_io_imem_sfence_bits_rs1),
.io_imem_sfence_bits_rs2 (_core_io_imem_sfence_bits_rs2),
.io_imem_sfence_bits_addr (_core_io_imem_sfence_bits_addr),
.io_imem_sfence_bits_asid (_core_io_imem_sfence_bits_asid),
.io_imem_sfence_bits_hv (_core_io_imem_sfence_bits_hv),
.io_imem_sfence_bits_hg (_core_io_imem_sfence_bits_hg),
.io_imem_resp_ready (_core_io_imem_resp_ready),
.io_imem_resp_valid (_frontend_io_cpu_resp_valid), // @[Frontend.scala:393:28]
.io_imem_resp_bits_btb_cfiType (_frontend_io_cpu_resp_bits_btb_cfiType), // @[Frontend.scala:393:28]
.io_imem_resp_bits_btb_taken (_frontend_io_cpu_resp_bits_btb_taken), // @[Frontend.scala:393:28]
.io_imem_resp_bits_btb_mask (_frontend_io_cpu_resp_bits_btb_mask), // @[Frontend.scala:393:28]
.io_imem_resp_bits_btb_bridx (_frontend_io_cpu_resp_bits_btb_bridx), // @[Frontend.scala:393:28]
.io_imem_resp_bits_btb_target (_frontend_io_cpu_resp_bits_btb_target), // @[Frontend.scala:393:28]
.io_imem_resp_bits_btb_entry (_frontend_io_cpu_resp_bits_btb_entry), // @[Frontend.scala:393:28]
.io_imem_resp_bits_btb_bht_history (_frontend_io_cpu_resp_bits_btb_bht_history), // @[Frontend.scala:393:28]
.io_imem_resp_bits_btb_bht_value (_frontend_io_cpu_resp_bits_btb_bht_value), // @[Frontend.scala:393:28]
.io_imem_resp_bits_pc (_frontend_io_cpu_resp_bits_pc), // @[Frontend.scala:393:28]
.io_imem_resp_bits_data (_frontend_io_cpu_resp_bits_data), // @[Frontend.scala:393:28]
.io_imem_resp_bits_mask (_frontend_io_cpu_resp_bits_mask), // @[Frontend.scala:393:28]
.io_imem_resp_bits_xcpt_pf_inst (_frontend_io_cpu_resp_bits_xcpt_pf_inst), // @[Frontend.scala:393:28]
.io_imem_resp_bits_xcpt_gf_inst (_frontend_io_cpu_resp_bits_xcpt_gf_inst), // @[Frontend.scala:393:28]
.io_imem_resp_bits_xcpt_ae_inst (_frontend_io_cpu_resp_bits_xcpt_ae_inst), // @[Frontend.scala:393:28]
.io_imem_resp_bits_replay (_frontend_io_cpu_resp_bits_replay), // @[Frontend.scala:393:28]
.io_imem_gpa_valid (_frontend_io_cpu_gpa_valid), // @[Frontend.scala:393:28]
.io_imem_gpa_bits (_frontend_io_cpu_gpa_bits), // @[Frontend.scala:393:28]
.io_imem_gpa_is_pte (_frontend_io_cpu_gpa_is_pte), // @[Frontend.scala:393:28]
.io_imem_btb_update_valid (_core_io_imem_btb_update_valid),
.io_imem_btb_update_bits_prediction_cfiType (_core_io_imem_btb_update_bits_prediction_cfiType),
.io_imem_btb_update_bits_prediction_taken (_core_io_imem_btb_update_bits_prediction_taken),
.io_imem_btb_update_bits_prediction_mask (_core_io_imem_btb_update_bits_prediction_mask),
.io_imem_btb_update_bits_prediction_bridx (_core_io_imem_btb_update_bits_prediction_bridx),
.io_imem_btb_update_bits_prediction_target (_core_io_imem_btb_update_bits_prediction_target),
.io_imem_btb_update_bits_prediction_entry (_core_io_imem_btb_update_bits_prediction_entry),
.io_imem_btb_update_bits_prediction_bht_history (_core_io_imem_btb_update_bits_prediction_bht_history),
.io_imem_btb_update_bits_prediction_bht_value (_core_io_imem_btb_update_bits_prediction_bht_value),
.io_imem_btb_update_bits_pc (_core_io_imem_btb_update_bits_pc),
.io_imem_btb_update_bits_target (_core_io_imem_btb_update_bits_target),
.io_imem_btb_update_bits_isValid (_core_io_imem_btb_update_bits_isValid),
.io_imem_btb_update_bits_br_pc (_core_io_imem_btb_update_bits_br_pc),
.io_imem_btb_update_bits_cfiType (_core_io_imem_btb_update_bits_cfiType),
.io_imem_bht_update_valid (_core_io_imem_bht_update_valid),
.io_imem_bht_update_bits_prediction_history (_core_io_imem_bht_update_bits_prediction_history),
.io_imem_bht_update_bits_prediction_value (_core_io_imem_bht_update_bits_prediction_value),
.io_imem_bht_update_bits_pc (_core_io_imem_bht_update_bits_pc),
.io_imem_bht_update_bits_branch (_core_io_imem_bht_update_bits_branch),
.io_imem_bht_update_bits_taken (_core_io_imem_bht_update_bits_taken),
.io_imem_bht_update_bits_mispredict (_core_io_imem_bht_update_bits_mispredict),
.io_imem_flush_icache (_core_io_imem_flush_icache),
.io_imem_npc (_frontend_io_cpu_npc), // @[Frontend.scala:393:28]
.io_imem_perf_acquire (_frontend_io_cpu_perf_acquire), // @[Frontend.scala:393:28]
.io_imem_progress (_core_io_imem_progress),
.io_dmem_req_ready (_dcacheArb_io_requestor_0_req_ready), // @[HellaCache.scala:292:25]
.io_dmem_req_valid (_core_io_dmem_req_valid),
.io_dmem_req_bits_addr (_core_io_dmem_req_bits_addr),
.io_dmem_req_bits_tag (_core_io_dmem_req_bits_tag),
.io_dmem_req_bits_cmd (_core_io_dmem_req_bits_cmd),
.io_dmem_req_bits_size (_core_io_dmem_req_bits_size),
.io_dmem_req_bits_signed (_core_io_dmem_req_bits_signed),
.io_dmem_req_bits_dv (_core_io_dmem_req_bits_dv),
.io_dmem_req_bits_no_resp (_core_io_dmem_req_bits_no_resp),
.io_dmem_s1_kill (_core_io_dmem_s1_kill),
.io_dmem_s1_data_data (_core_io_dmem_s1_data_data),
.io_dmem_s2_nack (_dcacheArb_io_requestor_0_s2_nack), // @[HellaCache.scala:292:25]
.io_dmem_s2_nack_cause_raw (_dcacheArb_io_requestor_0_s2_nack_cause_raw), // @[HellaCache.scala:292:25]
.io_dmem_s2_uncached (_dcacheArb_io_requestor_0_s2_uncached), // @[HellaCache.scala:292:25]
.io_dmem_s2_paddr (_dcacheArb_io_requestor_0_s2_paddr), // @[HellaCache.scala:292:25]
.io_dmem_resp_valid (_dcacheArb_io_requestor_0_resp_valid), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_addr (_dcacheArb_io_requestor_0_resp_bits_addr), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_tag (_dcacheArb_io_requestor_0_resp_bits_tag), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_cmd (_dcacheArb_io_requestor_0_resp_bits_cmd), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_size (_dcacheArb_io_requestor_0_resp_bits_size), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_signed (_dcacheArb_io_requestor_0_resp_bits_signed), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_dprv (_dcacheArb_io_requestor_0_resp_bits_dprv), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_dv (_dcacheArb_io_requestor_0_resp_bits_dv), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_data (_dcacheArb_io_requestor_0_resp_bits_data), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_mask (_dcacheArb_io_requestor_0_resp_bits_mask), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_replay (_dcacheArb_io_requestor_0_resp_bits_replay), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_has_data (_dcacheArb_io_requestor_0_resp_bits_has_data), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_data_word_bypass (_dcacheArb_io_requestor_0_resp_bits_data_word_bypass), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_data_raw (_dcacheArb_io_requestor_0_resp_bits_data_raw), // @[HellaCache.scala:292:25]
.io_dmem_resp_bits_store_data (_dcacheArb_io_requestor_0_resp_bits_store_data), // @[HellaCache.scala:292:25]
.io_dmem_replay_next (_dcacheArb_io_requestor_0_replay_next), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_ma_ld (_dcacheArb_io_requestor_0_s2_xcpt_ma_ld), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_ma_st (_dcacheArb_io_requestor_0_s2_xcpt_ma_st), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_pf_ld (_dcacheArb_io_requestor_0_s2_xcpt_pf_ld), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_pf_st (_dcacheArb_io_requestor_0_s2_xcpt_pf_st), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_ae_ld (_dcacheArb_io_requestor_0_s2_xcpt_ae_ld), // @[HellaCache.scala:292:25]
.io_dmem_s2_xcpt_ae_st (_dcacheArb_io_requestor_0_s2_xcpt_ae_st), // @[HellaCache.scala:292:25]
.io_dmem_s2_gpa (_dcacheArb_io_requestor_0_s2_gpa), // @[HellaCache.scala:292:25]
.io_dmem_ordered (_dcacheArb_io_requestor_0_ordered), // @[HellaCache.scala:292:25]
.io_dmem_store_pending (_dcacheArb_io_requestor_0_store_pending), // @[HellaCache.scala:292:25]
.io_dmem_perf_acquire (_dcacheArb_io_requestor_0_perf_acquire), // @[HellaCache.scala:292:25]
.io_dmem_perf_grant (_dcacheArb_io_requestor_0_perf_grant), // @[HellaCache.scala:292:25]
.io_dmem_perf_blocked (_dcacheArb_io_requestor_0_perf_blocked), // @[HellaCache.scala:292:25]
.io_dmem_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad), // @[HellaCache.scala:292:25]
.io_dmem_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW), // @[HellaCache.scala:292:25]
.io_dmem_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad), // @[HellaCache.scala:292:25]
.io_dmem_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad), // @[HellaCache.scala:292:25]
.io_dmem_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore), // @[HellaCache.scala:292:25]
.io_dmem_keep_clock_enabled (_core_io_dmem_keep_clock_enabled),
.io_ptw_sfence_valid (_core_io_ptw_sfence_valid),
.io_ptw_sfence_bits_rs1 (_core_io_ptw_sfence_bits_rs1),
.io_ptw_sfence_bits_rs2 (_core_io_ptw_sfence_bits_rs2),
.io_ptw_sfence_bits_addr (_core_io_ptw_sfence_bits_addr),
.io_ptw_sfence_bits_asid (_core_io_ptw_sfence_bits_asid),
.io_ptw_sfence_bits_hv (_core_io_ptw_sfence_bits_hv),
.io_ptw_sfence_bits_hg (_core_io_ptw_sfence_bits_hg),
.io_ptw_status_debug (_core_io_ptw_status_debug),
.io_ptw_status_cease (_core_io_ptw_status_cease),
.io_ptw_status_wfi (_core_io_ptw_status_wfi),
.io_ptw_status_isa (_core_io_ptw_status_isa),
.io_ptw_status_dv (_core_io_ptw_status_dv),
.io_ptw_status_v (_core_io_ptw_status_v),
.io_ptw_status_sd (_core_io_ptw_status_sd),
.io_ptw_status_mpv (_core_io_ptw_status_mpv),
.io_ptw_status_gva (_core_io_ptw_status_gva),
.io_ptw_status_fs (_core_io_ptw_status_fs),
.io_ptw_status_mpp (_core_io_ptw_status_mpp),
.io_ptw_status_mpie (_core_io_ptw_status_mpie),
.io_ptw_status_mie (_core_io_ptw_status_mie),
.io_ptw_gstatus_debug (_core_io_ptw_gstatus_debug),
.io_ptw_gstatus_cease (_core_io_ptw_gstatus_cease),
.io_ptw_gstatus_wfi (_core_io_ptw_gstatus_wfi),
.io_ptw_gstatus_isa (_core_io_ptw_gstatus_isa),
.io_ptw_gstatus_dprv (_core_io_ptw_gstatus_dprv),
.io_ptw_gstatus_dv (_core_io_ptw_gstatus_dv),
.io_ptw_gstatus_prv (_core_io_ptw_gstatus_prv),
.io_ptw_gstatus_v (_core_io_ptw_gstatus_v),
.io_ptw_gstatus_sd (_core_io_ptw_gstatus_sd),
.io_ptw_gstatus_zero2 (_core_io_ptw_gstatus_zero2),
.io_ptw_gstatus_mpv (_core_io_ptw_gstatus_mpv),
.io_ptw_gstatus_gva (_core_io_ptw_gstatus_gva),
.io_ptw_gstatus_mbe (_core_io_ptw_gstatus_mbe),
.io_ptw_gstatus_sbe (_core_io_ptw_gstatus_sbe),
.io_ptw_gstatus_sxl (_core_io_ptw_gstatus_sxl),
.io_ptw_gstatus_zero1 (_core_io_ptw_gstatus_zero1),
.io_ptw_gstatus_tsr (_core_io_ptw_gstatus_tsr),
.io_ptw_gstatus_tw (_core_io_ptw_gstatus_tw),
.io_ptw_gstatus_tvm (_core_io_ptw_gstatus_tvm),
.io_ptw_gstatus_mxr (_core_io_ptw_gstatus_mxr),
.io_ptw_gstatus_sum (_core_io_ptw_gstatus_sum),
.io_ptw_gstatus_mprv (_core_io_ptw_gstatus_mprv),
.io_ptw_gstatus_fs (_core_io_ptw_gstatus_fs),
.io_ptw_gstatus_mpp (_core_io_ptw_gstatus_mpp),
.io_ptw_gstatus_vs (_core_io_ptw_gstatus_vs),
.io_ptw_gstatus_spp (_core_io_ptw_gstatus_spp),
.io_ptw_gstatus_mpie (_core_io_ptw_gstatus_mpie),
.io_ptw_gstatus_ube (_core_io_ptw_gstatus_ube),
.io_ptw_gstatus_spie (_core_io_ptw_gstatus_spie),
.io_ptw_gstatus_upie (_core_io_ptw_gstatus_upie),
.io_ptw_gstatus_mie (_core_io_ptw_gstatus_mie),
.io_ptw_gstatus_hie (_core_io_ptw_gstatus_hie),
.io_ptw_gstatus_sie (_core_io_ptw_gstatus_sie),
.io_ptw_gstatus_uie (_core_io_ptw_gstatus_uie),
.io_ptw_pmp_0_cfg_l (_core_io_ptw_pmp_0_cfg_l),
.io_ptw_pmp_0_cfg_a (_core_io_ptw_pmp_0_cfg_a),
.io_ptw_pmp_0_cfg_x (_core_io_ptw_pmp_0_cfg_x),
.io_ptw_pmp_0_cfg_w (_core_io_ptw_pmp_0_cfg_w),
.io_ptw_pmp_0_cfg_r (_core_io_ptw_pmp_0_cfg_r),
.io_ptw_pmp_0_addr (_core_io_ptw_pmp_0_addr),
.io_ptw_pmp_0_mask (_core_io_ptw_pmp_0_mask),
.io_ptw_pmp_1_cfg_l (_core_io_ptw_pmp_1_cfg_l),
.io_ptw_pmp_1_cfg_a (_core_io_ptw_pmp_1_cfg_a),
.io_ptw_pmp_1_cfg_x (_core_io_ptw_pmp_1_cfg_x),
.io_ptw_pmp_1_cfg_w (_core_io_ptw_pmp_1_cfg_w),
.io_ptw_pmp_1_cfg_r (_core_io_ptw_pmp_1_cfg_r),
.io_ptw_pmp_1_addr (_core_io_ptw_pmp_1_addr),
.io_ptw_pmp_1_mask (_core_io_ptw_pmp_1_mask),
.io_ptw_pmp_2_cfg_l (_core_io_ptw_pmp_2_cfg_l),
.io_ptw_pmp_2_cfg_a (_core_io_ptw_pmp_2_cfg_a),
.io_ptw_pmp_2_cfg_x (_core_io_ptw_pmp_2_cfg_x),
.io_ptw_pmp_2_cfg_w (_core_io_ptw_pmp_2_cfg_w),
.io_ptw_pmp_2_cfg_r (_core_io_ptw_pmp_2_cfg_r),
.io_ptw_pmp_2_addr (_core_io_ptw_pmp_2_addr),
.io_ptw_pmp_2_mask (_core_io_ptw_pmp_2_mask),
.io_ptw_pmp_3_cfg_l (_core_io_ptw_pmp_3_cfg_l),
.io_ptw_pmp_3_cfg_a (_core_io_ptw_pmp_3_cfg_a),
.io_ptw_pmp_3_cfg_x (_core_io_ptw_pmp_3_cfg_x),
.io_ptw_pmp_3_cfg_w (_core_io_ptw_pmp_3_cfg_w),
.io_ptw_pmp_3_cfg_r (_core_io_ptw_pmp_3_cfg_r),
.io_ptw_pmp_3_addr (_core_io_ptw_pmp_3_addr),
.io_ptw_pmp_3_mask (_core_io_ptw_pmp_3_mask),
.io_ptw_pmp_4_cfg_l (_core_io_ptw_pmp_4_cfg_l),
.io_ptw_pmp_4_cfg_a (_core_io_ptw_pmp_4_cfg_a),
.io_ptw_pmp_4_cfg_x (_core_io_ptw_pmp_4_cfg_x),
.io_ptw_pmp_4_cfg_w (_core_io_ptw_pmp_4_cfg_w),
.io_ptw_pmp_4_cfg_r (_core_io_ptw_pmp_4_cfg_r),
.io_ptw_pmp_4_addr (_core_io_ptw_pmp_4_addr),
.io_ptw_pmp_4_mask (_core_io_ptw_pmp_4_mask),
.io_ptw_pmp_5_cfg_l (_core_io_ptw_pmp_5_cfg_l),
.io_ptw_pmp_5_cfg_a (_core_io_ptw_pmp_5_cfg_a),
.io_ptw_pmp_5_cfg_x (_core_io_ptw_pmp_5_cfg_x),
.io_ptw_pmp_5_cfg_w (_core_io_ptw_pmp_5_cfg_w),
.io_ptw_pmp_5_cfg_r (_core_io_ptw_pmp_5_cfg_r),
.io_ptw_pmp_5_addr (_core_io_ptw_pmp_5_addr),
.io_ptw_pmp_5_mask (_core_io_ptw_pmp_5_mask),
.io_ptw_pmp_6_cfg_l (_core_io_ptw_pmp_6_cfg_l),
.io_ptw_pmp_6_cfg_a (_core_io_ptw_pmp_6_cfg_a),
.io_ptw_pmp_6_cfg_x (_core_io_ptw_pmp_6_cfg_x),
.io_ptw_pmp_6_cfg_w (_core_io_ptw_pmp_6_cfg_w),
.io_ptw_pmp_6_cfg_r (_core_io_ptw_pmp_6_cfg_r),
.io_ptw_pmp_6_addr (_core_io_ptw_pmp_6_addr),
.io_ptw_pmp_6_mask (_core_io_ptw_pmp_6_mask),
.io_ptw_pmp_7_cfg_l (_core_io_ptw_pmp_7_cfg_l),
.io_ptw_pmp_7_cfg_a (_core_io_ptw_pmp_7_cfg_a),
.io_ptw_pmp_7_cfg_x (_core_io_ptw_pmp_7_cfg_x),
.io_ptw_pmp_7_cfg_w (_core_io_ptw_pmp_7_cfg_w),
.io_ptw_pmp_7_cfg_r (_core_io_ptw_pmp_7_cfg_r),
.io_ptw_pmp_7_addr (_core_io_ptw_pmp_7_addr),
.io_ptw_pmp_7_mask (_core_io_ptw_pmp_7_mask),
.io_ptw_perf_pte_miss (_ptw_io_dpath_perf_pte_miss), // @[PTW.scala:802:19]
.io_ptw_perf_pte_hit (_ptw_io_dpath_perf_pte_hit), // @[PTW.scala:802:19]
.io_ptw_customCSRs_csrs_0_ren (_core_io_ptw_customCSRs_csrs_0_ren),
.io_ptw_customCSRs_csrs_0_wen (_core_io_ptw_customCSRs_csrs_0_wen),
.io_ptw_customCSRs_csrs_0_wdata (_core_io_ptw_customCSRs_csrs_0_wdata),
.io_ptw_customCSRs_csrs_0_value (_core_io_ptw_customCSRs_csrs_0_value),
.io_ptw_customCSRs_csrs_1_ren (_core_io_ptw_customCSRs_csrs_1_ren),
.io_ptw_customCSRs_csrs_1_wen (_core_io_ptw_customCSRs_csrs_1_wen),
.io_ptw_customCSRs_csrs_1_wdata (_core_io_ptw_customCSRs_csrs_1_wdata),
.io_ptw_customCSRs_csrs_1_value (_core_io_ptw_customCSRs_csrs_1_value),
.io_ptw_customCSRs_csrs_2_ren (_core_io_ptw_customCSRs_csrs_2_ren),
.io_ptw_customCSRs_csrs_2_wen (_core_io_ptw_customCSRs_csrs_2_wen),
.io_ptw_customCSRs_csrs_2_wdata (_core_io_ptw_customCSRs_csrs_2_wdata),
.io_ptw_customCSRs_csrs_2_value (_core_io_ptw_customCSRs_csrs_2_value),
.io_ptw_customCSRs_csrs_3_ren (_core_io_ptw_customCSRs_csrs_3_ren),
.io_ptw_customCSRs_csrs_3_wen (_core_io_ptw_customCSRs_csrs_3_wen),
.io_ptw_customCSRs_csrs_3_wdata (_core_io_ptw_customCSRs_csrs_3_wdata),
.io_ptw_customCSRs_csrs_3_value (_core_io_ptw_customCSRs_csrs_3_value),
.io_fpu_hartid (_core_io_fpu_hartid),
.io_fpu_time (_core_io_fpu_time),
.io_fpu_inst (_core_io_fpu_inst),
.io_fpu_fromint_data (_core_io_fpu_fromint_data),
.io_fpu_fcsr_rm (_core_io_fpu_fcsr_rm),
.io_fpu_fcsr_flags_valid (_fpuOpt_io_fcsr_flags_valid), // @[RocketTile.scala:242:62]
.io_fpu_fcsr_flags_bits (_fpuOpt_io_fcsr_flags_bits), // @[RocketTile.scala:242:62]
.io_fpu_store_data (_fpuOpt_io_store_data), // @[RocketTile.scala:242:62]
.io_fpu_toint_data (_fpuOpt_io_toint_data), // @[RocketTile.scala:242:62]
.io_fpu_ll_resp_val (_core_io_fpu_ll_resp_val),
.io_fpu_ll_resp_type (_core_io_fpu_ll_resp_type),
.io_fpu_ll_resp_tag (_core_io_fpu_ll_resp_tag),
.io_fpu_ll_resp_data (_core_io_fpu_ll_resp_data),
.io_fpu_valid (_core_io_fpu_valid),
.io_fpu_fcsr_rdy (_fpuOpt_io_fcsr_rdy), // @[RocketTile.scala:242:62]
.io_fpu_nack_mem (_fpuOpt_io_nack_mem), // @[RocketTile.scala:242:62]
.io_fpu_illegal_rm (_fpuOpt_io_illegal_rm), // @[RocketTile.scala:242:62]
.io_fpu_killx (_core_io_fpu_killx),
.io_fpu_killm (_core_io_fpu_killm),
.io_fpu_dec_ldst (_fpuOpt_io_dec_ldst), // @[RocketTile.scala:242:62]
.io_fpu_dec_wen (_fpuOpt_io_dec_wen), // @[RocketTile.scala:242:62]
.io_fpu_dec_ren1 (_fpuOpt_io_dec_ren1), // @[RocketTile.scala:242:62]
.io_fpu_dec_ren2 (_fpuOpt_io_dec_ren2), // @[RocketTile.scala:242:62]
.io_fpu_dec_ren3 (_fpuOpt_io_dec_ren3), // @[RocketTile.scala:242:62]
.io_fpu_dec_swap12 (_fpuOpt_io_dec_swap12), // @[RocketTile.scala:242:62]
.io_fpu_dec_swap23 (_fpuOpt_io_dec_swap23), // @[RocketTile.scala:242:62]
.io_fpu_dec_typeTagIn (_fpuOpt_io_dec_typeTagIn), // @[RocketTile.scala:242:62]
.io_fpu_dec_typeTagOut (_fpuOpt_io_dec_typeTagOut), // @[RocketTile.scala:242:62]
.io_fpu_dec_fromint (_fpuOpt_io_dec_fromint), // @[RocketTile.scala:242:62]
.io_fpu_dec_toint (_fpuOpt_io_dec_toint), // @[RocketTile.scala:242:62]
.io_fpu_dec_fastpipe (_fpuOpt_io_dec_fastpipe), // @[RocketTile.scala:242:62]
.io_fpu_dec_fma (_fpuOpt_io_dec_fma), // @[RocketTile.scala:242:62]
.io_fpu_dec_div (_fpuOpt_io_dec_div), // @[RocketTile.scala:242:62]
.io_fpu_dec_sqrt (_fpuOpt_io_dec_sqrt), // @[RocketTile.scala:242:62]
.io_fpu_dec_wflags (_fpuOpt_io_dec_wflags), // @[RocketTile.scala:242:62]
.io_fpu_dec_vec (_fpuOpt_io_dec_vec), // @[RocketTile.scala:242:62]
.io_fpu_sboard_set (_fpuOpt_io_sboard_set), // @[RocketTile.scala:242:62]
.io_fpu_sboard_clr (_fpuOpt_io_sboard_clr), // @[RocketTile.scala:242:62]
.io_fpu_sboard_clra (_fpuOpt_io_sboard_clra), // @[RocketTile.scala:242:62]
.io_fpu_keep_clock_enabled (_core_io_fpu_keep_clock_enabled),
.io_trace_insns_0_valid (traceSourceNodeOut_insns_0_valid),
.io_trace_insns_0_iaddr (traceSourceNodeOut_insns_0_iaddr),
.io_trace_insns_0_insn (traceSourceNodeOut_insns_0_insn),
.io_trace_insns_0_priv (traceSourceNodeOut_insns_0_priv),
.io_trace_insns_0_exception (traceSourceNodeOut_insns_0_exception),
.io_trace_insns_0_interrupt (traceSourceNodeOut_insns_0_interrupt),
.io_trace_insns_0_cause (traceSourceNodeOut_insns_0_cause),
.io_trace_insns_0_tval (traceSourceNodeOut_insns_0_tval),
.io_trace_time (traceSourceNodeOut_time),
.io_bpwatch_0_valid_0 (bpwatchSourceNodeOut_0_valid_0),
.io_bpwatch_0_action (bpwatchSourceNodeOut_0_action),
.io_wfi (_core_io_wfi)
); // @[RocketTile.scala:147:20]
assign auto_buffer_in_a_ready = auto_buffer_in_a_ready_0; // @[RocketTile.scala:141:7]
assign auto_buffer_in_d_valid = auto_buffer_in_d_valid_0; // @[RocketTile.scala:141:7]
assign auto_buffer_in_d_bits_opcode = auto_buffer_in_d_bits_opcode_0; // @[RocketTile.scala:141:7]
assign auto_buffer_in_d_bits_size = auto_buffer_in_d_bits_size_0; // @[RocketTile.scala:141:7]
assign auto_buffer_in_d_bits_source = auto_buffer_in_d_bits_source_0; // @[RocketTile.scala:141:7]
assign auto_buffer_in_d_bits_data = auto_buffer_in_d_bits_data_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_valid = auto_buffer_out_a_valid_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_opcode = auto_buffer_out_a_bits_opcode_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_param = auto_buffer_out_a_bits_param_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_size = auto_buffer_out_a_bits_size_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_source = auto_buffer_out_a_bits_source_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_address = auto_buffer_out_a_bits_address_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_user_amba_prot_bufferable = auto_buffer_out_a_bits_user_amba_prot_bufferable_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_user_amba_prot_modifiable = auto_buffer_out_a_bits_user_amba_prot_modifiable_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_user_amba_prot_readalloc = auto_buffer_out_a_bits_user_amba_prot_readalloc_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_user_amba_prot_writealloc = auto_buffer_out_a_bits_user_amba_prot_writealloc_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_user_amba_prot_privileged = auto_buffer_out_a_bits_user_amba_prot_privileged_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_user_amba_prot_secure = auto_buffer_out_a_bits_user_amba_prot_secure_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_user_amba_prot_fetch = auto_buffer_out_a_bits_user_amba_prot_fetch_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_mask = auto_buffer_out_a_bits_mask_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_a_bits_data = auto_buffer_out_a_bits_data_0; // @[RocketTile.scala:141:7]
assign auto_buffer_out_d_ready = auto_buffer_out_d_ready_0; // @[RocketTile.scala:141:7]
assign auto_wfi_out_0 = auto_wfi_out_0_0; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_valid = auto_trace_source_out_insns_0_valid_0; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_iaddr = auto_trace_source_out_insns_0_iaddr_0; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_insn = auto_trace_source_out_insns_0_insn_0; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_priv = auto_trace_source_out_insns_0_priv_0; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_exception = auto_trace_source_out_insns_0_exception_0; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_interrupt = auto_trace_source_out_insns_0_interrupt_0; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_cause = auto_trace_source_out_insns_0_cause_0; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_insns_0_tval = auto_trace_source_out_insns_0_tval_0; // @[RocketTile.scala:141:7]
assign auto_trace_source_out_time = auto_trace_source_out_time_0; // @[RocketTile.scala:141:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File MulRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to
Chisel by Andrew Waterman).
Copyright 2019, 2020 The Regents of the University of California. All rights
reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1))
})
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf)
val notNaN_isInfOut = io.a.isInf || io.b.isInf
val notNaN_isZeroOut = io.a.isZero || io.b.isZero
val notNaN_signOut = io.a.sign ^ io.b.sign
val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S
val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc
io.rawOut.isInf := notNaN_isInfOut
io.rawOut.isZero := notNaN_isZeroOut
io.rawOut.sExp := common_sExpOut
io.rawOut.isNaN := io.a.isNaN || io.b.isNaN
io.rawOut.sign := notNaN_signOut
io.rawOut.sig := common_sigOut
}
class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth))
mulFullRaw.io.a := io.a
mulFullRaw.io.b := io.b
io.invalidExc := mulFullRaw.io.invalidExc
io.rawOut := mulFullRaw.io.rawOut
io.rawOut.sig := {
val sig = mulFullRaw.io.rawOut.sig
Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR)
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(UInt((expWidth + sigWidth + 1).W))
val b = Input(UInt((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(Bool())
val out = Output(UInt((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(UInt(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulRawFN = Module(new MulRawFN(expWidth, sigWidth))
mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a)
mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulRawFN.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulFullRawFN_26( // @[MulRecFN.scala:47:7]
input io_a_isNaN, // @[MulRecFN.scala:49:16]
input io_a_isInf, // @[MulRecFN.scala:49:16]
input io_a_isZero, // @[MulRecFN.scala:49:16]
input io_a_sign, // @[MulRecFN.scala:49:16]
input [9:0] io_a_sExp, // @[MulRecFN.scala:49:16]
input [24:0] io_a_sig, // @[MulRecFN.scala:49:16]
input io_b_isNaN, // @[MulRecFN.scala:49:16]
input io_b_isInf, // @[MulRecFN.scala:49:16]
input io_b_isZero, // @[MulRecFN.scala:49:16]
input io_b_sign, // @[MulRecFN.scala:49:16]
input [9:0] io_b_sExp, // @[MulRecFN.scala:49:16]
input [24:0] io_b_sig, // @[MulRecFN.scala:49:16]
output io_invalidExc, // @[MulRecFN.scala:49:16]
output io_rawOut_isNaN, // @[MulRecFN.scala:49:16]
output io_rawOut_isInf, // @[MulRecFN.scala:49:16]
output io_rawOut_isZero, // @[MulRecFN.scala:49:16]
output io_rawOut_sign, // @[MulRecFN.scala:49:16]
output [9:0] io_rawOut_sExp, // @[MulRecFN.scala:49:16]
output [47:0] io_rawOut_sig // @[MulRecFN.scala:49:16]
);
wire io_a_isNaN_0 = io_a_isNaN; // @[MulRecFN.scala:47:7]
wire io_a_isInf_0 = io_a_isInf; // @[MulRecFN.scala:47:7]
wire io_a_isZero_0 = io_a_isZero; // @[MulRecFN.scala:47:7]
wire io_a_sign_0 = io_a_sign; // @[MulRecFN.scala:47:7]
wire [9:0] io_a_sExp_0 = io_a_sExp; // @[MulRecFN.scala:47:7]
wire [24:0] io_a_sig_0 = io_a_sig; // @[MulRecFN.scala:47:7]
wire io_b_isNaN_0 = io_b_isNaN; // @[MulRecFN.scala:47:7]
wire io_b_isInf_0 = io_b_isInf; // @[MulRecFN.scala:47:7]
wire io_b_isZero_0 = io_b_isZero; // @[MulRecFN.scala:47:7]
wire io_b_sign_0 = io_b_sign; // @[MulRecFN.scala:47:7]
wire [9:0] io_b_sExp_0 = io_b_sExp; // @[MulRecFN.scala:47:7]
wire [24:0] io_b_sig_0 = io_b_sig; // @[MulRecFN.scala:47:7]
wire _io_invalidExc_T_7; // @[MulRecFN.scala:66:71]
wire _io_rawOut_isNaN_T; // @[MulRecFN.scala:70:35]
wire notNaN_isInfOut; // @[MulRecFN.scala:59:38]
wire notNaN_isZeroOut; // @[MulRecFN.scala:60:40]
wire notNaN_signOut; // @[MulRecFN.scala:61:36]
wire [9:0] common_sExpOut; // @[MulRecFN.scala:62:48]
wire [47:0] common_sigOut; // @[MulRecFN.scala:63:46]
wire io_rawOut_isNaN_0; // @[MulRecFN.scala:47:7]
wire io_rawOut_isInf_0; // @[MulRecFN.scala:47:7]
wire io_rawOut_isZero_0; // @[MulRecFN.scala:47:7]
wire io_rawOut_sign_0; // @[MulRecFN.scala:47:7]
wire [9:0] io_rawOut_sExp_0; // @[MulRecFN.scala:47:7]
wire [47:0] io_rawOut_sig_0; // @[MulRecFN.scala:47:7]
wire io_invalidExc_0; // @[MulRecFN.scala:47:7]
wire _notSigNaN_invalidExc_T = io_a_isInf_0 & io_b_isZero_0; // @[MulRecFN.scala:47:7, :58:44]
wire _notSigNaN_invalidExc_T_1 = io_a_isZero_0 & io_b_isInf_0; // @[MulRecFN.scala:47:7, :58:76]
wire notSigNaN_invalidExc = _notSigNaN_invalidExc_T | _notSigNaN_invalidExc_T_1; // @[MulRecFN.scala:58:{44,60,76}]
assign notNaN_isInfOut = io_a_isInf_0 | io_b_isInf_0; // @[MulRecFN.scala:47:7, :59:38]
assign io_rawOut_isInf_0 = notNaN_isInfOut; // @[MulRecFN.scala:47:7, :59:38]
assign notNaN_isZeroOut = io_a_isZero_0 | io_b_isZero_0; // @[MulRecFN.scala:47:7, :60:40]
assign io_rawOut_isZero_0 = notNaN_isZeroOut; // @[MulRecFN.scala:47:7, :60:40]
assign notNaN_signOut = io_a_sign_0 ^ io_b_sign_0; // @[MulRecFN.scala:47:7, :61:36]
assign io_rawOut_sign_0 = notNaN_signOut; // @[MulRecFN.scala:47:7, :61:36]
wire [10:0] _common_sExpOut_T = {io_a_sExp_0[9], io_a_sExp_0} + {io_b_sExp_0[9], io_b_sExp_0}; // @[MulRecFN.scala:47:7, :62:36]
wire [9:0] _common_sExpOut_T_1 = _common_sExpOut_T[9:0]; // @[MulRecFN.scala:62:36]
wire [9:0] _common_sExpOut_T_2 = _common_sExpOut_T_1; // @[MulRecFN.scala:62:36]
wire [10:0] _common_sExpOut_T_3 = {_common_sExpOut_T_2[9], _common_sExpOut_T_2} - 11'h100; // @[MulRecFN.scala:62:{36,48}]
wire [9:0] _common_sExpOut_T_4 = _common_sExpOut_T_3[9:0]; // @[MulRecFN.scala:62:48]
assign common_sExpOut = _common_sExpOut_T_4; // @[MulRecFN.scala:62:48]
assign io_rawOut_sExp_0 = common_sExpOut; // @[MulRecFN.scala:47:7, :62:48]
wire [49:0] _common_sigOut_T = {25'h0, io_a_sig_0} * {25'h0, io_b_sig_0}; // @[MulRecFN.scala:47:7, :63:35]
assign common_sigOut = _common_sigOut_T[47:0]; // @[MulRecFN.scala:63:{35,46}]
assign io_rawOut_sig_0 = common_sigOut; // @[MulRecFN.scala:47:7, :63:46]
wire _io_invalidExc_T = io_a_sig_0[22]; // @[common.scala:82:56]
wire _io_invalidExc_T_1 = ~_io_invalidExc_T; // @[common.scala:82:{49,56}]
wire _io_invalidExc_T_2 = io_a_isNaN_0 & _io_invalidExc_T_1; // @[common.scala:82:{46,49}]
wire _io_invalidExc_T_3 = io_b_sig_0[22]; // @[common.scala:82:56]
wire _io_invalidExc_T_4 = ~_io_invalidExc_T_3; // @[common.scala:82:{49,56}]
wire _io_invalidExc_T_5 = io_b_isNaN_0 & _io_invalidExc_T_4; // @[common.scala:82:{46,49}]
wire _io_invalidExc_T_6 = _io_invalidExc_T_2 | _io_invalidExc_T_5; // @[common.scala:82:46]
assign _io_invalidExc_T_7 = _io_invalidExc_T_6 | notSigNaN_invalidExc; // @[MulRecFN.scala:58:60, :66:{45,71}]
assign io_invalidExc_0 = _io_invalidExc_T_7; // @[MulRecFN.scala:47:7, :66:71]
assign _io_rawOut_isNaN_T = io_a_isNaN_0 | io_b_isNaN_0; // @[MulRecFN.scala:47:7, :70:35]
assign io_rawOut_isNaN_0 = _io_rawOut_isNaN_T; // @[MulRecFN.scala:47:7, :70:35]
assign io_invalidExc = io_invalidExc_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_sign = io_rawOut_sign_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_sig = io_rawOut_sig_0; // @[MulRecFN.scala:47:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_236( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [31:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7]
wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54]
wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Transposer.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
trait Transposer[T <: Data] extends Module {
def dim: Int
def dataType: T
val io = IO(new Bundle {
val inRow = Flipped(Decoupled(Vec(dim, dataType)))
val outCol = Decoupled(Vec(dim, dataType))
})
}
class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
val sMoveUp :: sMoveLeft :: Nil = Enum(2)
val state = RegInit(sMoveUp)
val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1)
val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1)
io.outCol.valid := 0.U
io.inRow.ready := 0.U
switch(state) {
is(sMoveUp) {
io.inRow.ready := upCounter <= dim.U
io.outCol.valid := leftCounter > 0.U
when(io.inRow.fire) {
upCounter := upCounter + 1.U
}
when(upCounter === (dim-1).U) {
state := sMoveLeft
leftCounter := 0.U
}
when(io.outCol.fire) {
leftCounter := leftCounter - 1.U
}
}
is(sMoveLeft) {
io.inRow.ready := leftCounter <= dim.U // TODO: this is naive
io.outCol.valid := upCounter > 0.U
when(leftCounter === (dim-1).U) {
state := sMoveUp
}
when(io.inRow.fire) {
leftCounter := leftCounter + 1.U
upCounter := 0.U
}
when(io.outCol.fire) {
upCounter := upCounter - 1.U
}
}
}
// Propagate input from bottom row to top row systolically in the move up phase
// TODO: need to iterate over columns to connect Chisel values of type T
// Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?)
for (colIdx <- 0 until dim) {
regArray.foldRight(io.inRow.bits(colIdx)) {
case (regRow, prevReg) =>
when (state === sMoveUp) {
regRow(colIdx) := prevReg
}
regRow(colIdx)
}
}
// Propagate input from right side to left side systolically in the move left phase
for (rowIdx <- 0 until dim) {
regArrayT.foldRight(io.inRow.bits(rowIdx)) {
case (regCol, prevReg) =>
when (state === sMoveLeft) {
regCol(rowIdx) := prevReg
}
regCol(rowIdx)
}
}
// Pull from the left side or the top side based on the state
for (idx <- 0 until dim) {
when (state === sMoveUp) {
io.outCol.bits(idx) := regArray(0)(idx)
}.elsewhen(state === sMoveLeft) {
io.outCol.bits(idx) := regArrayT(0)(idx)
}.otherwise {
io.outCol.bits(idx) := DontCare
}
}
}
class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val LEFT_DIR = 0.U(1.W)
val UP_DIR = 1.U(1.W)
class PE extends Module {
val io = IO(new Bundle {
val inR = Input(dataType)
val inD = Input(dataType)
val outL = Output(dataType)
val outU = Output(dataType)
val dir = Input(UInt(1.W))
val en = Input(Bool())
})
val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en)
io.outU := reg
io.outL := reg
}
val pes = Seq.fill(dim,dim)(Module(new PE))
val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter
val dir = RegInit(LEFT_DIR)
// Wire up horizontal signals
for (row <- 0 until dim; col <- 0 until dim) {
val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL
pes(row)(col).io.inR := right_in
}
// Wire up vertical signals
for (row <- 0 until dim; col <- 0 until dim) {
val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU
pes(row)(col).io.inD := down_in
}
// Wire up global signals
pes.flatten.foreach(_.io.dir := dir)
pes.flatten.foreach(_.io.en := io.inRow.fire)
io.outCol.valid := true.B
io.inRow.ready := true.B
val left_out = VecInit(pes.transpose.head.map(_.io.outL))
val up_out = VecInit(pes.head.map(_.io.outU))
io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out)
when (io.inRow.fire) {
counter := wrappingAdd(counter, 1.U, dim)
}
when (counter === (dim-1).U && io.inRow.fire) {
dir := ~dir
}
}
class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
// state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise
val state = RegInit(0.U(1.W))
val countInc = io.inRow.fire || io.outCol.fire
val (countValue, countWrap) = Counter(countInc, dim)
io.inRow.ready := state === 0.U
io.outCol.valid := state === 1.U
for (i <- 0 until dim) {
for (j <- 0 until dim) {
when(countValue === i.U && io.inRow.fire) {
regArray(i)(j) := io.inRow.bits(j)
}
}
}
for (i <- 0 until dim) {
io.outCol.bits(i) := 0.U
for (j <- 0 until dim) {
when(countValue === j.U) {
io.outCol.bits(i) := regArrayT(j)(i)
}
}
}
when (io.inRow.fire && countWrap) {
state := 1.U
}
when (io.outCol.fire && countWrap) {
state := 0.U
}
assert(!(state === 0.U) || !io.outCol.fire)
assert(!(state === 1.U) || !io.inRow.fire)
}
| module PE_128( // @[Transposer.scala:100:9]
input clock, // @[Transposer.scala:100:9]
input reset, // @[Transposer.scala:100:9]
input [7:0] io_inR, // @[Transposer.scala:101:16]
input [7:0] io_inD, // @[Transposer.scala:101:16]
output [7:0] io_outL, // @[Transposer.scala:101:16]
output [7:0] io_outU, // @[Transposer.scala:101:16]
input io_dir, // @[Transposer.scala:101:16]
input io_en // @[Transposer.scala:101:16]
);
wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9]
wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9]
wire io_dir_0 = io_dir; // @[Transposer.scala:100:9]
wire io_en_0 = io_en; // @[Transposer.scala:100:9]
wire [7:0] io_outL_0; // @[Transposer.scala:100:9]
wire [7:0] io_outU_0; // @[Transposer.scala:100:9]
wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36]
wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}]
reg [7:0] reg_0; // @[Transposer.scala:110:24]
assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
always @(posedge clock) begin // @[Transposer.scala:100:9]
if (io_en_0) // @[Transposer.scala:100:9]
reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}]
always @(posedge)
assign io_outL = io_outL_0; // @[Transposer.scala:100:9]
assign io_outU = io_outU_0; // @[Transposer.scala:100:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [4:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [4:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [4:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [26:0] _GEN = {23'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [4:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [4:0] source_1; // @[Monitor.scala:541:22]
reg [4:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [16:0] inflight; // @[Monitor.scala:614:27]
reg [67:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [135:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire [31:0] _GEN_0 = {27'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [31:0] _GEN_3 = {27'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [16:0] inflight_1; // @[Monitor.scala:726:35]
reg [135:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File MSHR.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
| module MSHR_1( // @[MSHR.scala:84:7]
input clock, // @[MSHR.scala:84:7]
input reset, // @[MSHR.scala:84:7]
input io_allocate_valid, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_0, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_1, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_2, // @[MSHR.scala:86:14]
input io_allocate_bits_control, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14]
input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14]
input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14]
input io_allocate_bits_repeat, // @[MSHR.scala:86:14]
input io_directory_valid, // @[MSHR.scala:86:14]
input io_directory_bits_dirty, // @[MSHR.scala:86:14]
input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14]
input io_directory_bits_clients, // @[MSHR.scala:86:14]
input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14]
input io_directory_bits_hit, // @[MSHR.scala:86:14]
input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14]
output io_status_valid, // @[MSHR.scala:86:14]
output [9:0] io_status_bits_set, // @[MSHR.scala:86:14]
output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14]
output [2:0] io_status_bits_way, // @[MSHR.scala:86:14]
output io_status_bits_blockB, // @[MSHR.scala:86:14]
output io_status_bits_nestB, // @[MSHR.scala:86:14]
output io_status_bits_blockC, // @[MSHR.scala:86:14]
output io_status_bits_nestC, // @[MSHR.scala:86:14]
input io_schedule_ready, // @[MSHR.scala:86:14]
output io_schedule_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_a_valid, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14]
output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14]
output io_schedule_bits_b_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14]
output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14]
output io_schedule_bits_c_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14]
output io_schedule_bits_d_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14]
output io_schedule_bits_e_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14]
output io_schedule_bits_x_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14]
output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14]
output io_schedule_bits_reload, // @[MSHR.scala:86:14]
input io_sinkc_valid, // @[MSHR.scala:86:14]
input io_sinkc_bits_last, // @[MSHR.scala:86:14]
input [9:0] io_sinkc_bits_set, // @[MSHR.scala:86:14]
input [12:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14]
input io_sinkc_bits_data, // @[MSHR.scala:86:14]
input io_sinkd_valid, // @[MSHR.scala:86:14]
input io_sinkd_bits_last, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14]
input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14]
input io_sinkd_bits_denied, // @[MSHR.scala:86:14]
input io_sinke_valid, // @[MSHR.scala:86:14]
input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14]
input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14]
input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14]
input io_nestedwb_b_toN, // @[MSHR.scala:86:14]
input io_nestedwb_b_toB, // @[MSHR.scala:86:14]
input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14]
input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14]
);
wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38]
wire final_meta_writeback_clients; // @[MSHR.scala:215:38]
wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38]
wire final_meta_writeback_dirty; // @[MSHR.scala:215:38]
wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7]
wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7]
wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7]
wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7]
wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7]
wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7]
wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7]
wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7]
wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7]
wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7]
wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7]
wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7]
wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7]
wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7]
wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7]
wire [9:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7]
wire [12:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7]
wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7]
wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7]
wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7]
wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7]
wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7]
wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7]
wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7]
wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7]
wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7]
wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7]
wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7]
wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68]
wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80]
wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21]
wire invalid_clients = 1'h0; // @[MSHR.scala:268:21]
wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137]
wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137]
wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _req_clientBit_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _probe_bit_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _new_clientBit_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21]
wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21]
wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70]
wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34]
wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34]
wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34]
wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40]
wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93]
wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28]
wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39]
wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105]
wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55]
wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91]
wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41]
wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41]
wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41]
wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51]
wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64]
wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41]
wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41]
wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57]
wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41]
wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43]
wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40]
wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66]
wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41]
wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41]
wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41]
wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41]
wire no_wait; // @[MSHR.scala:183:83]
wire [5:0] _probe_bit_uncommonBits_T = io_sinkc_bits_source_0; // @[Parameters.scala:52:29]
wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7]
wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7]
wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockB_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestB_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockC_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestC_0; // @[MSHR.scala:84:7]
wire io_status_valid_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7]
wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7]
wire io_schedule_valid_0; // @[MSHR.scala:84:7]
reg request_valid; // @[MSHR.scala:97:30]
assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30]
reg request_prio_0; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20]
reg request_prio_1; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20]
reg request_prio_2; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20]
reg request_control; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_opcode; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_param; // @[MSHR.scala:98:20]
reg [2:0] request_size; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_source; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20]
wire [5:0] _req_clientBit_uncommonBits_T = request_source; // @[Parameters.scala:52:29]
reg [12:0] request_tag; // @[MSHR.scala:98:20]
assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_offset; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_put; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20]
reg [9:0] request_set; // @[MSHR.scala:98:20]
assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
reg meta_valid; // @[MSHR.scala:99:27]
reg meta_dirty; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17]
reg [1:0] meta_state; // @[MSHR.scala:100:17]
reg meta_clients; // @[MSHR.scala:100:17]
wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39]
wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
reg [12:0] meta_tag; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17]
reg meta_hit; // @[MSHR.scala:100:17]
reg [2:0] meta_way; // @[MSHR.scala:100:17]
assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38]
reg s_rprobe; // @[MSHR.scala:121:33]
reg w_rprobeackfirst; // @[MSHR.scala:122:33]
reg w_rprobeacklast; // @[MSHR.scala:123:33]
reg s_release; // @[MSHR.scala:124:33]
reg w_releaseack; // @[MSHR.scala:125:33]
reg s_pprobe; // @[MSHR.scala:126:33]
reg s_acquire; // @[MSHR.scala:127:33]
reg s_flush; // @[MSHR.scala:128:33]
reg w_grantfirst; // @[MSHR.scala:129:33]
reg w_grantlast; // @[MSHR.scala:130:33]
reg w_grant; // @[MSHR.scala:131:33]
reg w_pprobeackfirst; // @[MSHR.scala:132:33]
reg w_pprobeacklast; // @[MSHR.scala:133:33]
reg w_pprobeack; // @[MSHR.scala:134:33]
reg s_grantack; // @[MSHR.scala:136:33]
reg s_execute; // @[MSHR.scala:137:33]
reg w_grantack; // @[MSHR.scala:138:33]
reg s_writeback; // @[MSHR.scala:139:33]
reg [2:0] sink; // @[MSHR.scala:147:17]
assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17]
reg gotT; // @[MSHR.scala:148:17]
reg bad_grant; // @[MSHR.scala:149:22]
assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22]
reg probes_done; // @[MSHR.scala:150:24]
reg probes_toN; // @[MSHR.scala:151:23]
reg probes_noT; // @[MSHR.scala:152:23]
wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28]
wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45]
wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62]
wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}]
wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82]
wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}]
wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103]
wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}]
assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}]
assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40]
wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39]
wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}]
wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}]
wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96]
assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}]
assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93]
assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28]
assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28]
wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43]
wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64]
wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}]
wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85]
wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}]
assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}]
assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39]
wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33]
wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}]
wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}]
assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}]
assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83]
wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31]
wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}]
assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}]
assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55]
wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31]
wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44]
assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}]
assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41]
wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32]
wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}]
assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}]
assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64]
wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31]
wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}]
assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}]
assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57]
wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31]
assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}]
assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43]
wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31]
assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}]
assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40]
wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34]
wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}]
wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70]
wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}]
assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}]
assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66]
wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49]
wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}]
wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}]
wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49]
wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}]
assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}]
assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105]
wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71]
wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71]
wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71]
wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71]
wire final_meta_writeback_hit; // @[MSHR.scala:215:38]
wire [2:0] req_clientBit_uncommonBits = _req_clientBit_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] _req_clientBit_T = request_source[5:3]; // @[Parameters.scala:54:10]
wire _req_clientBit_T_1 = _req_clientBit_T == 3'h4; // @[Parameters.scala:54:{10,32}]
wire _req_clientBit_T_3 = _req_clientBit_T_1; // @[Parameters.scala:54:{32,67}]
wire _req_clientBit_T_4 = req_clientBit_uncommonBits < 3'h5; // @[Parameters.scala:52:56, :57:20]
wire req_clientBit = _req_clientBit_T_3 & _req_clientBit_T_4; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12]
wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12]
wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _req_needT_T_2; // @[Parameters.scala:270:13]
assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13]
wire _excluded_client_T_6; // @[Parameters.scala:279:117]
assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117]
wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42]
wire _req_needT_T_3; // @[Parameters.scala:270:42]
assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42]
wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11]
assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11]
wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42]
wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _req_needT_T_6; // @[Parameters.scala:271:14]
assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14]
wire _req_acquire_T; // @[MSHR.scala:219:36]
assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14]
wire _excluded_client_T_1; // @[Parameters.scala:279:12]
assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12]
wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52]
wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89]
wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52]
wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}]
wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}]
wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81]
wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}]
wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}]
wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}]
wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65]
wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}]
wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55]
wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78]
wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78]
assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78]
wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70]
wire _evict_T_2; // @[MSHR.scala:317:26]
assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _before_T_1; // @[MSHR.scala:317:26]
assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}]
wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}]
wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43]
assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43]
wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75]
wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:56:48]
wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}]
wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}]
wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54]
wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}]
wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45]
wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}]
wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}]
wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40]
wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40]
assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40]
wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65]
assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65]
wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41]
wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}]
wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72]
wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}]
wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70]
wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70]
wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53]
assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53]
wire _evict_T_1; // @[MSHR.scala:317:26]
assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire _before_T; // @[MSHR.scala:317:26]
assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70]
wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70]
wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55]
wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70]
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70]
wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66]
wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}]
wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}]
wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:56:48]
wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40]
assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30]
wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54]
wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}]
assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21]
assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21]
assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36]
assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36]
wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:56:48]
wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}]
wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}]
wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38]
wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}]
wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}]
wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}]
wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106]
wire excluded_client = _excluded_client_T_9 & req_clientBit; // @[Parameters.scala:56:48]
wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56]
wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70]
assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}]
wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51]
wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55]
wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52]
wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}]
wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}]
assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38]
assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91]
wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42]
wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70]
wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}]
assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}]
assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41]
wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42]
assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}]
assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41]
wire _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53]
assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}]
assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51]
assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41]
assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41]
assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}]
assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41]
wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42]
wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53]
wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53]
wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89]
wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79]
assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41]
wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42]
assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}]
assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41]
wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32]
wire [3:0] evict; // @[MSHR.scala:314:26]
wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32]
wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32]
wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32]
assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32]
assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39]
wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39]
assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39]
assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76]
wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76]
assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76]
assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32]
assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] before_0; // @[MSHR.scala:314:26]
wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32]
wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11]
assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] after; // @[MSHR.scala:314:26]
wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26]
wire _after_T; // @[MSHR.scala:317:26]
assign _after_T = _GEN_9; // @[MSHR.scala:317:26]
wire _prior_T; // @[MSHR.scala:317:26]
assign _prior_T = _GEN_9; // @[MSHR.scala:317:26]
wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32]
wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26]
wire _after_T_1; // @[MSHR.scala:317:26]
assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire _prior_T_1; // @[MSHR.scala:317:26]
assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32]
wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32]
assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32]
assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39]
wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39]
assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39]
assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76]
wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76]
assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76]
assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26]
wire _after_T_3; // @[MSHR.scala:317:26]
assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26]
wire _prior_T_3; // @[MSHR.scala:317:26]
assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26]
assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire [2:0] probe_bit_uncommonBits = _probe_bit_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] _probe_bit_T = io_sinkc_bits_source_0[5:3]; // @[Parameters.scala:54:10]
wire _probe_bit_T_1 = _probe_bit_T == 3'h4; // @[Parameters.scala:54:{10,32}]
wire _probe_bit_T_3 = _probe_bit_T_1; // @[Parameters.scala:54:{32,67}]
wire _probe_bit_T_4 = probe_bit_uncommonBits < 3'h5; // @[Parameters.scala:52:56, :57:20]
wire probe_bit = _probe_bit_T_3 & _probe_bit_T_4; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:56:48]
wire _last_probe_T; // @[MSHR.scala:459:33]
assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33]
wire _probes_done_T; // @[MSHR.scala:467:32]
assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32]
wire _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66]
wire _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}]
wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}]
wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11]
wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43]
wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75]
wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:56:48]
wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}]
wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53]
wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}]
wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42]
wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55]
wire _w_rprobeacklast_T; // @[MSHR.scala:471:55]
assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55]
wire _w_pprobeacklast_T; // @[MSHR.scala:473:55]
assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55]
wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}]
wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42]
wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}]
wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77]
wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}]
wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}]
wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32]
wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33]
wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}]
wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35]
wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40]
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] _new_clientBit_uncommonBits_T = new_request_source; // @[Parameters.scala:52:29]
wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12]
wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _new_needT_T_2; // @[Parameters.scala:270:13]
assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13]
wire _new_skipProbe_T_5; // @[Parameters.scala:279:117]
assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117]
wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42]
wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _new_needT_T_6; // @[Parameters.scala:271:14]
assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14]
wire _new_skipProbe_T; // @[Parameters.scala:279:12]
assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12]
wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52]
wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89]
wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire [2:0] new_clientBit_uncommonBits = _new_clientBit_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] _new_clientBit_T = new_request_source[5:3]; // @[Parameters.scala:54:10]
wire _new_clientBit_T_1 = _new_clientBit_T == 3'h4; // @[Parameters.scala:54:{10,32}]
wire _new_clientBit_T_3 = _new_clientBit_T_1; // @[Parameters.scala:54:{32,67}]
wire _new_clientBit_T_4 = new_clientBit_uncommonBits < 3'h5; // @[Parameters.scala:52:56, :57:20]
wire new_clientBit = _new_clientBit_T_3 & _new_clientBit_T_4; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}]
wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}]
wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}]
wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:56:48]
wire [3:0] prior; // @[MSHR.scala:314:26]
wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32]
wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28] |
Generate the Verilog code corresponding to the following Chisel files.
File Error.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.devices.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.resources.SimpleDevice
import freechips.rocketchip.tilelink.{TLArbiter, TLMessages, TLPermissions}
/** Adds a /dev/null slave that generates TL error response messages */
class TLError(params: DevNullParams, buffer: Boolean = true, beatBytes: Int = 4)(implicit p: Parameters)
extends DevNullDevice(params,
minLatency = if (buffer) 1 else 0,
beatBytes, new SimpleDevice("error-device", Seq("sifive,error0")))
{
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
import TLMessages._
import TLPermissions._
val (in, edge) = node.in(0)
val a = if (buffer) {Queue(in.a, 1)} else in.a
val da = Wire(chiselTypeOf(in.d))
val idle = RegInit(true.B)
val a_last = edge.last(a)
val (da_first, da_last, _) = edge.firstlast(da)
assert (idle || da_first) // we only send Grant, never GrantData => simplified flow control below
a.ready := (da.ready && da_last && idle) || !a_last
da.valid := a.valid && a_last && idle
da.bits.opcode := TLMessages.adResponse(a.bits.opcode)
da.bits.param := 0.U // toT, but error grants must be handled transiently (ie: you don't keep permissions)
da.bits.size := a.bits.size
da.bits.source := a.bits.source
da.bits.sink := 0.U
da.bits.denied := true.B
da.bits.data := 0.U
da.bits.corrupt := edge.hasData(da.bits)
if (params.acquire) {
val c = if (buffer) {Queue(in.c, 1)} else in.c
val dc = Wire(chiselTypeOf(in.d))
val c_last = edge.last(c)
val dc_last = edge.last(dc)
// Only allow one Grant in-flight at a time
when (da.fire && da.bits.opcode === Grant) { idle := false.B }
when (in.e.fire) { idle := true.B }
c.ready := (dc.ready && dc_last) || !c_last
dc.valid := c.valid && c_last
// ReleaseAck is not allowed to report failure
dc.bits.opcode := ReleaseAck
dc.bits.param := VecInit(toB, toN, toN)(c.bits.param(1,0))
dc.bits.size := c.bits.size
dc.bits.source := c.bits.source
dc.bits.sink := 0.U
dc.bits.denied := false.B
dc.bits.data := 0.U
dc.bits.corrupt := false.B
// Combine response channels
TLArbiter.lowest(edge, in.d, dc, da)
} else {
in.d <> da
}
// We never probe or issue B requests
in.b.valid := false.B
// Sink GrantAcks
in.e.ready := true.B
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLError( // @[Error.scala:21:9]
input clock, // @[Error.scala:21:9]
input reset, // @[Error.scala:21:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [13:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire _a_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire [2:0] _a_q_io_deq_bits_opcode; // @[Decoupled.scala:362:21]
wire [3:0] _a_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Error.scala:21:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Error.scala:21:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Error.scala:21:9]
wire [3:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Error.scala:21:9]
wire [6:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Error.scala:21:9]
wire [13:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Error.scala:21:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Error.scala:21:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Error.scala:21:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Error.scala:21:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Error.scala:21:9]
wire [7:0][2:0] _GEN = '{3'h4, 3'h4, 3'h2, 3'h1, 3'h1, 3'h1, 3'h0, 3'h0};
wire auto_in_d_bits_denied = 1'h1; // @[Error.scala:21:9]
wire nodeIn_d_bits_denied = 1'h1; // @[MixedNode.scala:551:17]
wire da_bits_denied = 1'h1; // @[Error.scala:28:18]
wire [1:0] auto_in_d_bits_param = 2'h0; // @[Error.scala:21:9]
wire [1:0] nodeIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] da_bits_param = 2'h0; // @[Error.scala:28:18]
wire auto_in_d_bits_sink = 1'h0; // @[Error.scala:21:9]
wire nodeIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire da_bits_sink = 1'h0; // @[Error.scala:28:18]
wire [63:0] auto_in_d_bits_data = 64'h0; // @[Error.scala:21:9]
wire [63:0] nodeIn_d_bits_data = 64'h0; // @[MixedNode.scala:551:17]
wire [63:0] da_bits_data = 64'h0; // @[Error.scala:28:18]
wire [2:0] _da_bits_opcode_WIRE_6 = 3'h4; // @[Bundles.scala:47:27]
wire [2:0] _da_bits_opcode_WIRE_7 = 3'h4; // @[Bundles.scala:47:27]
wire [2:0] _da_bits_opcode_WIRE_5 = 3'h2; // @[Bundles.scala:47:27]
wire [2:0] _da_bits_opcode_WIRE_2 = 3'h1; // @[Bundles.scala:47:27]
wire [2:0] _da_bits_opcode_WIRE_3 = 3'h1; // @[Bundles.scala:47:27]
wire [2:0] _da_bits_opcode_WIRE_4 = 3'h1; // @[Bundles.scala:47:27]
wire [2:0] _da_bits_opcode_WIRE_0 = 3'h0; // @[Bundles.scala:47:27]
wire [2:0] _da_bits_opcode_WIRE_1 = 3'h0; // @[Bundles.scala:47:27]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Error.scala:21:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Error.scala:21:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Error.scala:21:9]
wire [3:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Error.scala:21:9]
wire [6:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Error.scala:21:9]
wire [13:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Error.scala:21:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Error.scala:21:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Error.scala:21:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Error.scala:21:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Error.scala:21:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [6:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire auto_in_a_ready_0; // @[Error.scala:21:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Error.scala:21:9]
wire [3:0] auto_in_d_bits_size_0; // @[Error.scala:21:9]
wire [6:0] auto_in_d_bits_source_0; // @[Error.scala:21:9]
wire auto_in_d_bits_corrupt_0; // @[Error.scala:21:9]
wire auto_in_d_valid_0; // @[Error.scala:21:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Error.scala:21:9]
wire da_ready = nodeIn_d_ready; // @[Error.scala:28:18]
wire da_valid; // @[Error.scala:28:18]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Error.scala:21:9]
wire [2:0] da_bits_opcode; // @[Error.scala:28:18]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Error.scala:21:9]
wire [3:0] da_bits_size; // @[Error.scala:28:18]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Error.scala:21:9]
wire [6:0] da_bits_source; // @[Error.scala:28:18]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Error.scala:21:9]
wire da_bits_corrupt; // @[Error.scala:28:18]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Error.scala:21:9]
wire _da_valid_T_1; // @[Error.scala:36:35]
assign nodeIn_d_valid = da_valid; // @[Error.scala:28:18]
assign nodeIn_d_bits_opcode = da_bits_opcode; // @[Error.scala:28:18]
assign nodeIn_d_bits_size = da_bits_size; // @[Error.scala:28:18]
assign nodeIn_d_bits_source = da_bits_source; // @[Error.scala:28:18]
wire da_bits_corrupt_opdata; // @[Edges.scala:106:36]
assign nodeIn_d_bits_corrupt = da_bits_corrupt; // @[Error.scala:28:18]
wire _q_io_deq_ready_T_3; // @[Error.scala:35:46]
wire _a_last_T = _q_io_deq_ready_T_3 & _a_q_io_deq_valid; // @[Decoupled.scala:51:35, :362:21]
wire [26:0] _a_last_beats1_decode_T = 27'hFFF << _a_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
wire [11:0] _a_last_beats1_decode_T_1 = _a_last_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_last_beats1_decode_T_2 = ~_a_last_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_last_beats1_decode = _a_last_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_last_beats1_opdata_T = _a_q_io_deq_bits_opcode[2]; // @[Decoupled.scala:362:21]
wire a_last_beats1_opdata = ~_a_last_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_last_beats1 = a_last_beats1_opdata ? a_last_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_last_counter; // @[Edges.scala:229:27]
wire [9:0] _a_last_counter1_T = {1'h0, a_last_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_last_counter1 = _a_last_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_last_first = a_last_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_last_last_T = a_last_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_last_last_T_1 = a_last_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_last = _a_last_last_T | _a_last_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_last_done = a_last & _a_last_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_last_count_T = ~a_last_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_last_count = a_last_beats1 & _a_last_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_last_counter_T = a_last_first ? a_last_beats1 : a_last_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire _T = da_ready & da_valid; // @[Decoupled.scala:51:35]
wire [26:0] _r_beats1_decode_T = 27'hFFF << da_bits_size; // @[package.scala:243:71]
wire [11:0] _r_beats1_decode_T_1 = _r_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _r_beats1_decode_T_2 = ~_r_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] r_beats1_decode = _r_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire r_beats1_opdata = da_bits_opcode[0]; // @[Edges.scala:106:36]
assign da_bits_corrupt_opdata = da_bits_opcode[0]; // @[Edges.scala:106:36]
wire [8:0] r_beats1 = r_beats1_opdata ? r_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] r_counter; // @[Edges.scala:229:27]
wire [9:0] _r_counter1_T = {1'h0, r_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] r_counter1 = _r_counter1_T[8:0]; // @[Edges.scala:230:28]
wire da_first = r_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _r_last_T = r_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _r_last_T_1 = r_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire da_last = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire r_3 = da_last & _T; // @[Decoupled.scala:51:35]
wire [8:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] r_4 = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _r_counter_T = da_first ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire _q_io_deq_ready_T = da_ready & da_last; // @[Edges.scala:232:33]
wire _q_io_deq_ready_T_1 = _q_io_deq_ready_T; // @[Error.scala:35:{26,37}]
wire _q_io_deq_ready_T_2 = ~a_last; // @[Edges.scala:232:33]
assign _q_io_deq_ready_T_3 = _q_io_deq_ready_T_1 | _q_io_deq_ready_T_2; // @[Error.scala:35:{37,46,49}]
wire _da_valid_T = _a_q_io_deq_valid & a_last; // @[Decoupled.scala:362:21]
assign _da_valid_T_1 = _da_valid_T; // @[Error.scala:36:{25,35}]
assign da_valid = _da_valid_T_1; // @[Error.scala:28:18, :36:35]
assign da_bits_opcode = _GEN[_a_q_io_deq_bits_opcode]; // @[Decoupled.scala:362:21]
assign da_bits_corrupt = da_bits_corrupt_opdata; // @[Edges.scala:106:36]
always @(posedge clock) begin // @[Error.scala:21:9]
if (reset) begin // @[Error.scala:21:9]
a_last_counter <= 9'h0; // @[Edges.scala:229:27]
r_counter <= 9'h0; // @[Edges.scala:229:27]
end
else begin // @[Error.scala:21:9]
if (_a_last_T) // @[Decoupled.scala:51:35]
a_last_counter <= _a_last_counter_T; // @[Edges.scala:229:27, :236:21]
if (_T) // @[Decoupled.scala:51:35]
r_counter <= _r_counter_T; // @[Edges.scala:229:27, :236:21]
end
always @(posedge)
TLMonitor_19 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue1_TLBundleA_a14d64s7k1z4u a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (_q_io_deq_ready_T_3), // @[Error.scala:35:46]
.io_deq_valid (_a_q_io_deq_valid),
.io_deq_bits_opcode (_a_q_io_deq_bits_opcode),
.io_deq_bits_size (_a_q_io_deq_bits_size),
.io_deq_bits_source (da_bits_source)
); // @[Decoupled.scala:362:21]
assign da_bits_size = _a_q_io_deq_bits_size; // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Error.scala:21:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Error.scala:21:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Error.scala:21:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Error.scala:21:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Error.scala:21:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Error.scala:21:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Serdes.scala:
package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
class GenericSerializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericSerializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(t))
val out = Decoupled(new Flit(flitWidth))
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.flit := Mux(beat === 0.U, io.in.bits.asUInt, data(beat))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(flitWidth.W)))
data(0) := DontCare // unused, DCE this
}
}
io.busy := io.out.valid
}
class GenericDeserializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericDeserializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(t)
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats-1, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits := (if (dataBeats == 1) {
io.in.bits.flit.asTypeOf(t)
} else {
Cat(io.in.bits.flit, data.asUInt).asTypeOf(t)
})
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat(log2Ceil(dataBeats-1)-1,0)) := io.in.bits.flit
}
}
}
io.busy := beat =/= 0.U
}
class FlitToPhit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"FlitToPhit_f${flitWidth}_p${phitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Phit(phitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.phit := (if (dataBeats == 1) io.in.bits.flit else Mux(beat === 0.U, io.in.bits.flit, data(beat-1.U)))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(phitWidth.W))).tail
}
}
}
object FlitToPhit {
def apply(flit: DecoupledIO[Flit], phitWidth: Int): DecoupledIO[Phit] = {
val flit2phit = Module(new FlitToPhit(flit.bits.flitWidth, phitWidth))
flit2phit.io.in <> flit
flit2phit.io.out
}
}
class PhitToFlit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"PhitToFlit_p${phitWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Decoupled(new Flit(flitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits.flit := (if (dataBeats == 1) io.in.bits.phit else Cat(io.in.bits.phit, data.asUInt))
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat) := io.in.bits.phit
}
}
}
}
object PhitToFlit {
def apply(phit: DecoupledIO[Phit], flitWidth: Int): DecoupledIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in <> phit
phit2flit.io.out
}
def apply(phit: ValidIO[Phit], flitWidth: Int): ValidIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in.valid := phit.valid
phit2flit.io.in.bits := phit.bits
when (phit.valid) { assert(phit2flit.io.in.ready) }
val out = Wire(Valid(new Flit(flitWidth)))
out.valid := phit2flit.io.out.valid
out.bits := phit2flit.io.out.bits
phit2flit.io.out.ready := true.B
out
}
}
class PhitArbiter(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitArbiter_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Vec(channels, Decoupled(new Phit(phitWidth))))
val out = Decoupled(new Phit(phitWidth))
})
if (channels == 1) {
io.out <> io.in(0)
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val chosen_reg = Reg(UInt(headerWidth.W))
val chosen_prio = PriorityEncoder(io.in.map(_.valid))
val chosen = Mux(beat === 0.U, chosen_prio, chosen_reg)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.out.valid := VecInit(io.in.map(_.valid))(chosen)
io.out.bits.phit := Mux(beat < headerBeats.U,
chosen.asTypeOf(Vec(headerBeats, UInt(phitWidth.W)))(header_idx),
VecInit(io.in.map(_.bits.phit))(chosen))
for (i <- 0 until channels) {
io.in(i).ready := io.out.ready && beat >= headerBeats.U && chosen_reg === i.U
}
when (io.out.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) { chosen_reg := chosen_prio }
}
}
}
class PhitDemux(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitDemux_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Vec(channels, Decoupled(new Phit(phitWidth)))
})
if (channels == 1) {
io.out(0) <> io.in
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val channel_vec = Reg(Vec(headerBeats, UInt(phitWidth.W)))
val channel = channel_vec.asUInt(log2Ceil(channels)-1,0)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.in.ready := beat < headerBeats.U || VecInit(io.out.map(_.ready))(channel)
for (c <- 0 until channels) {
io.out(c).valid := io.in.valid && beat >= headerBeats.U && channel === c.U
io.out(c).bits.phit := io.in.bits.phit
}
when (io.in.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat < headerBeats.U) {
channel_vec(header_idx) := io.in.bits.phit
}
}
}
}
class DecoupledFlitToCreditedFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"DecoupledFlitToCreditedFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Flipped(Decoupled(new Flit(flitWidth)))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = io.out.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credits + credit_incr - Mux(io.credit.valid, io.credit.bits.flit +& 1.U, 0.U)
}
io.out.valid := io.in.valid && credits < bufferSz.U
io.out.bits.flit := io.in.bits.flit
io.in.ready := io.out.ready && credits < bufferSz.U
io.credit.ready := true.B
}
class CreditedFlitToDecoupledFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"CreditedFlitToDecoupledFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Decoupled(new Flit(flitWidth))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val buffer = Module(new Queue(new Flit(flitWidth), bufferSz))
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = buffer.io.deq.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credit_incr + Mux(credit_decr, 0.U, credits)
}
buffer.io.enq.valid := io.in.valid
buffer.io.enq.bits := io.in.bits
io.in.ready := true.B
when (io.in.valid) { assert(buffer.io.enq.ready) }
io.out <> buffer.io.deq
io.credit.valid := credits =/= 0.U
io.credit.bits.flit := credits - 1.U
}
| module FlitToPhit_f32_p32_4( // @[Serdes.scala:71:7]
input clock, // @[Serdes.scala:71:7]
input reset, // @[Serdes.scala:71:7]
output io_in_ready, // @[Serdes.scala:73:14]
input io_in_valid, // @[Serdes.scala:73:14]
input [31:0] io_in_bits_flit, // @[Serdes.scala:73:14]
input io_out_ready, // @[Serdes.scala:73:14]
output io_out_valid, // @[Serdes.scala:73:14]
output [31:0] io_out_bits_phit // @[Serdes.scala:73:14]
);
wire io_in_valid_0 = io_in_valid; // @[Serdes.scala:71:7]
wire [31:0] io_in_bits_flit_0 = io_in_bits_flit; // @[Serdes.scala:71:7]
wire io_out_ready_0 = io_out_ready; // @[Serdes.scala:71:7]
wire [1:0] _beat_T_1 = 2'h1; // @[Serdes.scala:88:53]
wire _io_in_ready_T = 1'h1; // @[Serdes.scala:83:39]
wire _beat_T = 1'h1; // @[Serdes.scala:88:22]
wire _beat_T_2 = 1'h1; // @[Serdes.scala:88:53]
wire _io_in_ready_T_1; // @[Serdes.scala:83:31]
wire _io_out_valid_T = 1'h0; // @[Serdes.scala:84:39]
wire _beat_T_3 = 1'h0; // @[Serdes.scala:88:16]
wire _io_out_valid_T_1 = io_in_valid_0; // @[Serdes.scala:71:7, :84:31]
wire [31:0] io_out_bits_phit_0 = io_in_bits_flit_0; // @[Serdes.scala:71:7]
assign _io_in_ready_T_1 = io_out_ready_0; // @[Serdes.scala:71:7, :83:31]
wire io_in_ready_0; // @[Serdes.scala:71:7]
wire io_out_valid_0; // @[Serdes.scala:71:7]
assign io_in_ready_0 = _io_in_ready_T_1; // @[Serdes.scala:71:7, :83:31]
assign io_out_valid_0 = _io_out_valid_T_1; // @[Serdes.scala:71:7, :84:31]
assign io_in_ready = io_in_ready_0; // @[Serdes.scala:71:7]
assign io_out_valid = io_out_valid_0; // @[Serdes.scala:71:7]
assign io_out_bits_phit = io_out_bits_phit_0; // @[Serdes.scala:71:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File FIFOFixer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.RegionType
import freechips.rocketchip.util.property
class TLFIFOFixer(policy: TLFIFOFixer.Policy = TLFIFOFixer.all)(implicit p: Parameters) extends LazyModule
{
private def fifoMap(seq: Seq[TLSlaveParameters]) = {
val (flatManagers, keepManagers) = seq.partition(policy)
// We need to be careful if one flatManager and one keepManager share an existing domain
// Erring on the side of caution, we will also flatten the keepManager in this case
val flatDomains = Set(flatManagers.flatMap(_.fifoId):_*) // => ID 0
val keepDomains = Set(keepManagers.flatMap(_.fifoId):_*) -- flatDomains // => IDs compacted
// Calculate what the FIFO domains look like after the fixer is applied
val flatMap = flatDomains.map { x => (x, 0) }.toMap
val keepMap = keepDomains.scanLeft((-1,0)) { case ((_,s),x) => (x, s+1) }.toMap
val map = flatMap ++ keepMap
val fixMap = seq.map { m => m.fifoId match {
case None => if (policy(m)) Some(0) else None
case Some(id) => Some(map(id)) // also flattens some who did not ask
} }
// Compress the FIFO domain space of those we are combining
val reMap = flatDomains.scanLeft((-1,-1)) { case ((_,s),x) => (x, s+1) }.toMap
val splatMap = seq.map { m => m.fifoId match {
case None => None
case Some(id) => reMap.lift(id)
} }
(fixMap, splatMap)
}
val node = new AdapterNode(TLImp)(
{ cp => cp },
{ mp =>
val (fixMap, _) = fifoMap(mp.managers)
mp.v1copy(managers = (fixMap zip mp.managers) map { case (id, m) => m.v1copy(fifoId = id) })
}) with TLFormatNode {
override def circuitIdentity = edges.in.map(_.client.clients.filter(c => c.requestFifo && c.sourceId.size > 1).size).sum == 0
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val (fixMap, splatMap) = fifoMap(edgeOut.manager.managers)
// Do we need to serialize the request to this manager?
val a_notFIFO = edgeIn.manager.fastProperty(in.a.bits.address, _.fifoId != Some(0), (b:Boolean) => b.B)
// Compact the IDs of the cases we serialize
val compacted = ((fixMap zip splatMap) zip edgeOut.manager.managers) flatMap {
case ((f, s), m) => if (f == Some(0)) Some(m.v1copy(fifoId = s)) else None
}
val sinks = if (compacted.exists(_.supportsAcquireB)) edgeOut.manager.endSinkId else 0
val a_id = if (compacted.isEmpty) 0.U else
edgeOut.manager.v1copy(managers = compacted, endSinkId = sinks).findFifoIdFast(in.a.bits.address)
val a_noDomain = a_id === 0.U
if (false) {
println(s"FIFOFixer for: ${edgeIn.client.clients.map(_.name).mkString(", ")}")
println(s"make FIFO: ${edgeIn.manager.managers.filter(_.fifoId==Some(0)).map(_.name).mkString(", ")}")
println(s"not FIFO: ${edgeIn.manager.managers.filter(_.fifoId!=Some(0)).map(_.name).mkString(", ")}")
println(s"domains: ${compacted.groupBy(_.name).mapValues(_.map(_.fifoId))}")
println("")
}
// Count beats
val a_first = edgeIn.first(in.a)
val d_first = edgeOut.first(out.d) && out.d.bits.opcode =/= TLMessages.ReleaseAck
// Keep one bit for each source recording if there is an outstanding request that must be made FIFO
// Sources unused in the stall signal calculation should be pruned by DCE
val flight = RegInit(VecInit(Seq.fill(edgeIn.client.endSourceId) { false.B }))
when (a_first && in.a.fire) { flight(in.a.bits.source) := !a_notFIFO }
when (d_first && in.d.fire) { flight(in.d.bits.source) := false.B }
val stalls = edgeIn.client.clients.filter(c => c.requestFifo && c.sourceId.size > 1).map { c =>
val a_sel = c.sourceId.contains(in.a.bits.source)
val id = RegEnable(a_id, in.a.fire && a_sel && !a_notFIFO)
val track = flight.slice(c.sourceId.start, c.sourceId.end)
a_sel && a_first && track.reduce(_ || _) && (a_noDomain || id =/= a_id)
}
val stall = stalls.foldLeft(false.B)(_||_)
out.a <> in.a
in.d <> out.d
out.a.valid := in.a.valid && (a_notFIFO || !stall)
in.a.ready := out.a.ready && (a_notFIFO || !stall)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> out.b
out.c <> in .c
out.e <> in .e
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
//Functional cover properties
property.cover(in.a.valid && stall, "COVER FIFOFIXER STALL", "Cover: Stall occured for a valid transaction")
val SourceIdFIFOed = RegInit(0.U(edgeIn.client.endSourceId.W))
val SourceIdSet = WireDefault(0.U(edgeIn.client.endSourceId.W))
val SourceIdClear = WireDefault(0.U(edgeIn.client.endSourceId.W))
when (a_first && in.a.fire && !a_notFIFO) {
SourceIdSet := UIntToOH(in.a.bits.source)
}
when (d_first && in.d.fire) {
SourceIdClear := UIntToOH(in.d.bits.source)
}
SourceIdFIFOed := SourceIdFIFOed | SourceIdSet
val allIDs_FIFOed = SourceIdFIFOed===Fill(SourceIdFIFOed.getWidth, 1.U)
property.cover(allIDs_FIFOed, "COVER all sources", "Cover: FIFOFIXER covers all Source IDs")
//property.cover(flight.reduce(_ && _), "COVER full", "Cover: FIFO is full with all Source IDs")
property.cover(!(flight.reduce(_ || _)), "COVER empty", "Cover: FIFO is empty")
property.cover(SourceIdSet > 0.U, "COVER at least one push", "Cover: At least one Source ID is pushed")
property.cover(SourceIdClear > 0.U, "COVER at least one pop", "Cover: At least one Source ID is popped")
}
}
}
object TLFIFOFixer
{
// Which slaves should have their FIFOness combined?
// NOTE: this transformation is still only applied for masters with requestFifo
type Policy = TLSlaveParameters => Boolean
import RegionType._
val all: Policy = m => true
val allFIFO: Policy = m => m.fifoId.isDefined
val allVolatile: Policy = m => m.regionType <= VOLATILE
def apply(policy: Policy = all)(implicit p: Parameters): TLNode =
{
val fixer = LazyModule(new TLFIFOFixer(policy))
fixer.node
}
}
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File ClockGroup.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.prci
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.resources.FixedClockResource
case class ClockGroupingNode(groupName: String)(implicit valName: ValName)
extends MixedNexusNode(ClockGroupImp, ClockImp)(
dFn = { _ => ClockSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) })
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupingNode(groupName)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
require (node.in.size == 1)
require (in.member.size == out.size)
(in.member.data zip out) foreach { case (i, o) => o := i }
}
}
object ClockGroup
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node
}
case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))})
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupAggregateNode(groupName)
override lazy val desiredName = s"ClockGroupAggregator_$groupName"
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in.unzip
val (out, _) = node.out.unzip
val outputs = out.flatMap(_.member.data)
require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1")
require (in.head.member.size == outputs.size)
in.head.member.data.zip(outputs).foreach { case (i, o) => o := i }
}
}
object ClockGroupAggregator
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node
}
class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val (out, _) = node.out.unzip
out.map { out: ClockGroupBundle =>
out.member.data.foreach { o =>
o.clock := clock; o.reset := reset }
}
}
}
object SimpleClockGroupSource
{
def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node
}
case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName)
extends NexusNode(ClockImp)(
dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) },
uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) },
inputRequiresOutput = false) {
def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix)))
}
class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule
{
val node = new FixedClockBroadcastNode(fixedClockOpt) {
override def circuitIdentity = outputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
override def desiredName = s"FixedClockBroadcast_${out.size}"
require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock")
out.foreach { _ := in }
}
}
object FixedClockBroadcast
{
def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node
}
case class PRCIClockGroupNode()(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { _ => ClockGroupSinkParameters("prci", Nil) },
outputRequiresInput = false)
File WidthWidget.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.util.{Repeater, UIntToOH1}
// innBeatBytes => the new client-facing bus width
class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule
{
private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes
val node = new TLAdapterNode(
clientFn = { case c => c },
managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){
override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired)
}
override lazy val desiredName = s"TLWidthWidget$innerBeatBytes"
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = outBytes / inBytes
val keepBits = log2Ceil(outBytes)
val dropBits = log2Ceil(inBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR }
val corrupt_reg = RegInit(false.B)
val corrupt_in = edgeIn.corrupt(in.bits)
val corrupt_out = corrupt_in || corrupt_reg
when (in.fire) {
count := count + 1.U
corrupt_reg := corrupt_out
when (last) {
count := 0.U
corrupt_reg := false.B
}
}
def helper(idata: UInt): UInt = {
// rdata is X until the first time a multi-beat write occurs.
// Prevent the X from leaking outside by jamming the mux control until
// the first time rdata is written (and hence no longer X).
val rdata_written_once = RegInit(false.B)
val masked_enable = enable.map(_ || !rdata_written_once)
val odata = Seq.fill(ratio) { WireInit(idata) }
val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata)))
val pdata = rdata :+ idata
val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) }
when (in.fire && !last) {
rdata_written_once := true.B
(rdata zip mdata) foreach { case (r, m) => r := m }
}
Cat(mdata.reverse)
}
in.ready := out.ready || !last
out.valid := in.valid && last
out.bits := in.bits
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits)))
edgeOut.corrupt(out.bits) := corrupt_out
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleC, i: TLBundleC) => ()
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossible bundle combination in WidthWidget")
}
}
def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = inBytes / outBytes
val keepBits = log2Ceil(inBytes)
val dropBits = log2Ceil(outBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
when (out.fire) {
count := count + 1.U
when (last) { count := 0.U }
}
// For sub-beat transfer, extract which part matters
val sel = in.bits match {
case a: TLBundleA => a.address(keepBits-1, dropBits)
case b: TLBundleB => b.address(keepBits-1, dropBits)
case c: TLBundleC => c.address(keepBits-1, dropBits)
case d: TLBundleD => {
val sel = sourceMap(d.source)
val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer
hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway
}
}
val index = sel | count
def helper(idata: UInt, width: Int): UInt = {
val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) }
mux(index)
}
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8))
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1)
case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1)
case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossbile bundle combination in WidthWidget")
}
// Repeat the input if we're not last
!last
}
def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) {
// nothing to do; pass it through
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
} else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) {
// split input to output
val repeat = Wire(Bool())
val repeated = Repeater(in, repeat)
val cated = Wire(chiselTypeOf(repeated))
cated <> repeated
edgeIn.data(cated.bits) := Cat(
edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8),
edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0))
repeat := split(edgeIn, cated, edgeOut, out, sourceMap)
} else {
// merge input to output
merge(edgeIn, in, edgeOut, out)
}
}
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// If the master is narrower than the slave, the D channel must be narrowed.
// This is tricky, because the D channel has no address data.
// Thus, you don't know which part of a sub-beat transfer to extract.
// To fix this, we record the relevant address bits for all sources.
// The assumption is that this sort of situation happens only where
// you connect a narrow master to the system bus, so there are few sources.
def sourceMap(source_bits: UInt) = {
val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits
require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes)
val keepBits = log2Ceil(edgeOut.manager.beatBytes)
val dropBits = log2Ceil(edgeIn.manager.beatBytes)
val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W)))
val a_sel = in.a.bits.address(keepBits-1, dropBits)
when (in.a.fire) {
if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning
sources(0) := a_sel
} else {
sources(in.a.bits.source) := a_sel
}
}
// depopulate unused source registers:
edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U }
val bypass = in.a.valid && in.a.bits.source === source
if (edgeIn.manager.minLatency > 0) sources(source)
else Mux(bypass, a_sel, sources(source))
}
splice(edgeIn, in.a, edgeOut, out.a, sourceMap)
splice(edgeOut, out.d, edgeIn, in.d, sourceMap)
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
splice(edgeOut, out.b, edgeIn, in.b, sourceMap)
splice(edgeIn, in.c, edgeOut, out.c, sourceMap)
out.e.valid := in.e.valid
out.e.bits := in.e.bits
in.e.ready := out.e.ready
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLWidthWidget
{
def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode =
{
val widget = LazyModule(new TLWidthWidget(innerBeatBytes))
widget.node
}
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("WidthWidget"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
(ram.node
:= TLDelayer(0.1)
:= TLFragmenter(4, 256)
:= TLWidthWidget(second)
:= TLWidthWidget(first)
:= TLDelayer(0.1)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module)
dut.io.start := DontCare
io.finished := dut.io.finished
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File SystemBus.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.subsystem
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.devices.tilelink.{
BuiltInDevices, BuiltInZeroDeviceParams, BuiltInErrorDeviceParams, HasBuiltInDeviceParams
}
import freechips.rocketchip.tilelink.{
TLArbiter, RegionReplicator, ReplicatedRegion, HasTLBusParams, TLBusWrapper,
TLBusWrapperInstantiationLike, TLXbar, TLEdge, TLInwardNode, TLOutwardNode,
TLFIFOFixer, TLTempNode
}
import freechips.rocketchip.util.Location
case class SystemBusParams(
beatBytes: Int,
blockBytes: Int,
policy: TLArbiter.Policy = TLArbiter.roundRobin,
dtsFrequency: Option[BigInt] = None,
zeroDevice: Option[BuiltInZeroDeviceParams] = None,
errorDevice: Option[BuiltInErrorDeviceParams] = None,
replication: Option[ReplicatedRegion] = None)
extends HasTLBusParams
with HasBuiltInDeviceParams
with TLBusWrapperInstantiationLike
{
def instantiate(context: HasTileLinkLocations, loc: Location[TLBusWrapper])(implicit p: Parameters): SystemBus = {
val sbus = LazyModule(new SystemBus(this, loc.name))
sbus.suggestName(loc.name)
context.tlBusWrapperLocationMap += (loc -> sbus)
sbus
}
}
class SystemBus(params: SystemBusParams, name: String = "system_bus")(implicit p: Parameters)
extends TLBusWrapper(params, name)
{
private val replicator = params.replication.map(r => LazyModule(new RegionReplicator(r)))
val prefixNode = replicator.map { r =>
r.prefix := addressPrefixNexusNode
addressPrefixNexusNode
}
private val system_bus_xbar = LazyModule(new TLXbar(policy = params.policy, nameSuffix = Some(name)))
val inwardNode: TLInwardNode = system_bus_xbar.node :=* TLFIFOFixer(TLFIFOFixer.allVolatile) :=* replicator.map(_.node).getOrElse(TLTempNode())
val outwardNode: TLOutwardNode = system_bus_xbar.node
def busView: TLEdge = system_bus_xbar.node.edges.in.head
val builtInDevices: BuiltInDevices = BuiltInDevices.attach(params, outwardNode)
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File LazyScope.scala:
package org.chipsalliance.diplomacy.lazymodule
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.ValName
/** Allows dynamic creation of [[Module]] hierarchy and "shoving" logic into a [[LazyModule]]. */
trait LazyScope {
this: LazyModule =>
override def toString: String = s"LazyScope named $name"
/** Evaluate `body` in the current [[LazyModule.scope]] */
def apply[T](body: => T): T = {
// Preserve the previous value of the [[LazyModule.scope]], because when calling [[apply]] function,
// [[LazyModule.scope]] will be altered.
val saved = LazyModule.scope
// [[LazyModule.scope]] stack push.
LazyModule.scope = Some(this)
// Evaluate [[body]] in the current `scope`, saving the result to [[out]].
val out = body
// Check that the `scope` after evaluating `body` is the same as when we started.
require(LazyModule.scope.isDefined, s"LazyScope $name tried to exit, but scope was empty!")
require(
LazyModule.scope.get eq this,
s"LazyScope $name exited before LazyModule ${LazyModule.scope.get.name} was closed"
)
// [[LazyModule.scope]] stack pop.
LazyModule.scope = saved
out
}
}
/** Used to automatically create a level of module hierarchy (a [[SimpleLazyModule]]) within which [[LazyModule]]s can
* be instantiated and connected.
*
* It will instantiate a [[SimpleLazyModule]] to manage evaluation of `body` and evaluate `body` code snippets in this
* scope.
*/
object LazyScope {
/** Create a [[LazyScope]] with an implicit instance name.
*
* @param body
* code executed within the generated [[SimpleLazyModule]].
* @param valName
* instance name of generated [[SimpleLazyModule]].
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
body: => T
)(
implicit valName: ValName,
p: Parameters
): T = {
apply(valName.value, "SimpleLazyModule", None)(body)(p)
}
/** Create a [[LazyScope]] with an explicitly defined instance name.
*
* @param name
* instance name of generated [[SimpleLazyModule]].
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
name: String
)(body: => T
)(
implicit p: Parameters
): T = {
apply(name, "SimpleLazyModule", None)(body)(p)
}
/** Create a [[LazyScope]] with an explicit instance and class name, and control inlining.
*
* @param name
* instance name of generated [[SimpleLazyModule]].
* @param desiredModuleName
* class name of generated [[SimpleLazyModule]].
* @param overrideInlining
* tell FIRRTL that this [[SimpleLazyModule]]'s module should be inlined.
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
name: String,
desiredModuleName: String,
overrideInlining: Option[Boolean] = None
)(body: => T
)(
implicit p: Parameters
): T = {
val scope = LazyModule(new SimpleLazyModule with LazyScope {
override lazy val desiredName = desiredModuleName
override def shouldBeInlined = overrideInlining.getOrElse(super.shouldBeInlined)
}).suggestName(name)
scope {
body
}
}
/** Create a [[LazyScope]] to temporarily group children for some reason, but tell Firrtl to inline it.
*
* For example, we might want to control a set of children's clocks but then not keep the parent wrapper.
*
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def inline[T](
body: => T
)(
implicit p: Parameters
): T = {
apply("noname", "ShouldBeInlined", Some(false))(body)(p)
}
}
| module SystemBus( // @[ClockDomain.scala:14:9]
output auto_coupler_from_rockettile_tl_master_clock_xing_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_from_rockettile_tl_master_clock_xing_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_coupler_from_rockettile_tl_master_clock_xing_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_from_rockettile_tl_master_clock_xing_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coupler_from_bus_named_fbus_bus_xing_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_from_bus_named_fbus_bus_xing_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_coupler_from_bus_named_fbus_bus_xing_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_from_bus_named_fbus_bus_xing_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_cbus_bus_xing_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bus_named_cbus_bus_xing_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bus_named_cbus_bus_xing_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_cbus_bus_xing_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_2_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_2_reset, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_1_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_1_reset, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_sbus_clock_groups_in_member_sbus_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_sbus_clock_groups_in_member_sbus_0_reset // @[LazyModuleImp.scala:107:25]
);
wire coupler_from_bus_named_fbus_widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire [5:0] coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [5:0] coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_auto_widget_anon_in_d_ready; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_auto_widget_anon_in_a_valid; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_data; // @[LazyModuleImp.scala:138:7]
wire [7:0] coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_mask; // @[LazyModuleImp.scala:138:7]
wire [31:0] coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_address; // @[LazyModuleImp.scala:138:7]
wire [5:0] coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_source; // @[LazyModuleImp.scala:138:7]
wire [3:0] coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_size; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire fixer_auto_anon_out_0_d_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_0_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_out_0_d_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_0_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_0_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_auto_anon_out_0_d_bits_source; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_auto_anon_out_0_d_bits_size; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_auto_anon_out_0_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_0_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_0_a_ready; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_1_d_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_1_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_out_1_d_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_1_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_1_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_1_d_bits_source; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_auto_anon_out_1_d_bits_size; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_auto_anon_out_1_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_1_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_1_a_ready; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_0_d_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_0_d_ready; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_0_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_in_0_d_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_0_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_0_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_auto_anon_in_0_d_bits_source; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_auto_anon_in_0_d_bits_size; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_auto_anon_in_0_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_0_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_0_a_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_0_a_ready; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_0_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_in_0_a_bits_data; // @[FIFOFixer.scala:50:9]
wire [7:0] fixer_auto_anon_in_0_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [31:0] fixer_auto_anon_in_0_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_auto_anon_in_0_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_auto_anon_in_0_a_bits_size; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_0_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_0_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_1_d_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_1_d_ready; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_1_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_in_1_d_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_1_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_1_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_1_d_bits_source; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_auto_anon_in_1_d_bits_size; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_auto_anon_in_1_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_1_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_1_a_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_1_a_ready; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_1_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_in_1_a_bits_data; // @[FIFOFixer.scala:50:9]
wire [7:0] fixer_auto_anon_in_1_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [31:0] fixer_auto_anon_in_1_a_bits_address; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_in_1_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_auto_anon_in_1_a_bits_size; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_1_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_in_1_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire sbus_clock_groups_auto_out_member_sbus_0_reset; // @[ClockGroup.scala:53:9]
wire sbus_clock_groups_auto_out_member_sbus_0_clock; // @[ClockGroup.scala:53:9]
wire auto_coupler_from_rockettile_tl_master_clock_xing_in_a_valid_0 = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_opcode_0 = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_param_0 = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_size_0 = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_size; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_source_0 = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_address_0 = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_address; // @[ClockDomain.scala:14:9]
wire [7:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_mask_0 = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_mask; // @[ClockDomain.scala:14:9]
wire [63:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_data_0 = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_data; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_corrupt_0 = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_rockettile_tl_master_clock_xing_in_d_ready_0 = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_ready; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_bus_named_fbus_bus_xing_in_a_valid_0 = auto_coupler_from_bus_named_fbus_bus_xing_in_a_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_opcode_0 = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_param_0 = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_size_0 = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_size; // @[ClockDomain.scala:14:9]
wire [4:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_source_0 = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_address_0 = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_address; // @[ClockDomain.scala:14:9]
wire [7:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_mask_0 = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_mask; // @[ClockDomain.scala:14:9]
wire [63:0] auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_data_0 = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_data; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_corrupt_0 = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_bus_named_fbus_bus_xing_in_d_ready_0 = auto_coupler_from_bus_named_fbus_bus_xing_in_d_ready; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_cbus_bus_xing_out_a_ready_0 = auto_coupler_to_bus_named_cbus_bus_xing_out_a_ready; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_cbus_bus_xing_out_d_valid_0 = auto_coupler_to_bus_named_cbus_bus_xing_out_d_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_opcode_0 = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_opcode; // @[ClockDomain.scala:14:9]
wire [1:0] auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_param_0 = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_size_0 = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_size; // @[ClockDomain.scala:14:9]
wire [5:0] auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_source_0 = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_source; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_sink_0 = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_sink; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_denied_0 = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_denied; // @[ClockDomain.scala:14:9]
wire [63:0] auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_data_0 = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_data; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_corrupt_0 = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_sbus_clock_groups_in_member_sbus_0_clock_0 = auto_sbus_clock_groups_in_member_sbus_0_clock; // @[ClockDomain.scala:14:9]
wire auto_sbus_clock_groups_in_member_sbus_0_reset_0 = auto_sbus_clock_groups_in_member_sbus_0_reset; // @[ClockDomain.scala:14:9]
wire [1:0] fixer__allIDs_FIFOed_T_1 = 2'h3; // @[FIFOFixer.scala:127:48]
wire fixer__a_notFIFO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire fixer__a_id_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire fixer__flight_T = 1'h1; // @[FIFOFixer.scala:80:65]
wire fixer__anonOut_a_valid_T = 1'h1; // @[FIFOFixer.scala:95:50]
wire fixer__anonOut_a_valid_T_1 = 1'h1; // @[FIFOFixer.scala:95:47]
wire fixer__anonIn_a_ready_T = 1'h1; // @[FIFOFixer.scala:96:50]
wire fixer__anonIn_a_ready_T_1 = 1'h1; // @[FIFOFixer.scala:96:47]
wire fixer__a_notFIFO_T_9 = 1'h1; // @[Parameters.scala:137:59]
wire fixer__a_id_T_9 = 1'h1; // @[Parameters.scala:137:59]
wire fixer__flight_T_1 = 1'h1; // @[FIFOFixer.scala:80:65]
wire fixer__anonOut_a_valid_T_3 = 1'h1; // @[FIFOFixer.scala:95:50]
wire fixer__anonOut_a_valid_T_4 = 1'h1; // @[FIFOFixer.scala:95:47]
wire fixer__anonIn_a_ready_T_3 = 1'h1; // @[FIFOFixer.scala:96:50]
wire fixer__anonIn_a_ready_T_4 = 1'h1; // @[FIFOFixer.scala:96:47]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire sbus_clock_groups_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire sbus_clock_groups_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire sbus_clock_groups__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire clockGroup_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire clockGroup_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire clockGroup__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire broadcast_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire broadcast_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire broadcast__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire fixer_a_noDomain = 1'h0; // @[FIFOFixer.scala:63:29]
wire fixer__flight_WIRE_0 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_1 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_2 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_3 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_4 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_5 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_6 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_7 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_8 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_9 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_10 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_11 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_12 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_13 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_14 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_15 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_16 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer_a_noDomain_1 = 1'h0; // @[FIFOFixer.scala:63:29]
wire fixer__flight_WIRE_1_0 = 1'h0; // @[FIFOFixer.scala:79:35]
wire fixer__flight_WIRE_1_1 = 1'h0; // @[FIFOFixer.scala:79:35]
wire [16:0] fixer__allIDs_FIFOed_T = 17'h1FFFF; // @[FIFOFixer.scala:127:48]
wire [32:0] fixer__a_notFIFO_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] fixer__a_notFIFO_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] fixer__a_id_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] fixer__a_id_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] fixer__a_notFIFO_T_7 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] fixer__a_notFIFO_T_8 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] fixer__a_id_T_7 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] fixer__a_id_T_8 = 33'h0; // @[Parameters.scala:137:46]
wire coupler_from_rockettile_auto_tl_master_clock_xing_in_a_ready; // @[LazyModuleImp.scala:138:7]
wire coupler_from_rockettile_auto_tl_master_clock_xing_in_a_valid = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_valid_0; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_opcode = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_param = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_size = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_size_0; // @[ClockDomain.scala:14:9]
wire coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_source = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_source_0; // @[ClockDomain.scala:14:9]
wire [31:0] coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_address = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_address_0; // @[ClockDomain.scala:14:9]
wire [7:0] coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_mask = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_mask_0; // @[ClockDomain.scala:14:9]
wire [63:0] coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_data = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_data_0; // @[ClockDomain.scala:14:9]
wire coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_corrupt = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire coupler_from_rockettile_auto_tl_master_clock_xing_in_d_ready = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_ready_0; // @[ClockDomain.scala:14:9]
wire coupler_from_rockettile_auto_tl_master_clock_xing_in_d_valid; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [1:0] coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_param; // @[LazyModuleImp.scala:138:7]
wire [3:0] coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_size; // @[LazyModuleImp.scala:138:7]
wire coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_source; // @[LazyModuleImp.scala:138:7]
wire coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_sink; // @[LazyModuleImp.scala:138:7]
wire coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_denied; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_data; // @[LazyModuleImp.scala:138:7]
wire coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire coupler_from_bus_named_fbus_auto_bus_xing_in_a_ready; // @[LazyModuleImp.scala:138:7]
wire coupler_from_bus_named_fbus_auto_bus_xing_in_a_valid = auto_coupler_from_bus_named_fbus_bus_xing_in_a_valid_0; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_opcode = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_param = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_size = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_size_0; // @[ClockDomain.scala:14:9]
wire [4:0] coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_source = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_source_0; // @[ClockDomain.scala:14:9]
wire [31:0] coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_address = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_address_0; // @[ClockDomain.scala:14:9]
wire [7:0] coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_mask = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_mask_0; // @[ClockDomain.scala:14:9]
wire [63:0] coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_data = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_data_0; // @[ClockDomain.scala:14:9]
wire coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_corrupt = auto_coupler_from_bus_named_fbus_bus_xing_in_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire coupler_from_bus_named_fbus_auto_bus_xing_in_d_ready = auto_coupler_from_bus_named_fbus_bus_xing_in_d_ready_0; // @[ClockDomain.scala:14:9]
wire coupler_from_bus_named_fbus_auto_bus_xing_in_d_valid; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [1:0] coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_param; // @[LazyModuleImp.scala:138:7]
wire [3:0] coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_size; // @[LazyModuleImp.scala:138:7]
wire [4:0] coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_source; // @[LazyModuleImp.scala:138:7]
wire coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_sink; // @[LazyModuleImp.scala:138:7]
wire coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_denied; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_data; // @[LazyModuleImp.scala:138:7]
wire coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_auto_bus_xing_out_a_ready = auto_coupler_to_bus_named_cbus_bus_xing_out_a_ready_0; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_cbus_auto_bus_xing_out_a_valid; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_param; // @[LazyModuleImp.scala:138:7]
wire [3:0] coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_size; // @[LazyModuleImp.scala:138:7]
wire [5:0] coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_source; // @[LazyModuleImp.scala:138:7]
wire [31:0] coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_address; // @[LazyModuleImp.scala:138:7]
wire [7:0] coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_mask; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_data; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_auto_bus_xing_out_d_ready; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_auto_bus_xing_out_d_valid = auto_coupler_to_bus_named_cbus_bus_xing_out_d_valid_0; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_opcode = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [1:0] coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_param = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_size = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_size_0; // @[ClockDomain.scala:14:9]
wire [5:0] coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_source = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_source_0; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_sink = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_sink_0; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_denied = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_denied_0; // @[ClockDomain.scala:14:9]
wire [63:0] coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_data = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_data_0; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_corrupt = auto_coupler_to_bus_named_cbus_bus_xing_out_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire sbus_clock_groups_auto_in_member_sbus_0_clock = auto_sbus_clock_groups_in_member_sbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire sbus_clock_groups_auto_in_member_sbus_0_reset = auto_sbus_clock_groups_in_member_sbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire auto_coupler_from_rockettile_tl_master_clock_xing_in_a_ready_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [1:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_sink_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_denied_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_rockettile_tl_master_clock_xing_in_d_valid_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_bus_named_fbus_bus_xing_in_a_ready_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [1:0] auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
wire [4:0] auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_sink_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_denied_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_from_bus_named_fbus_bus_xing_in_d_valid_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_param_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_size_0; // @[ClockDomain.scala:14:9]
wire [5:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_source_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_address_0; // @[ClockDomain.scala:14:9]
wire [7:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_mask_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_cbus_bus_xing_out_a_valid_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_cbus_bus_xing_out_d_ready_0; // @[ClockDomain.scala:14:9]
wire auto_fixedClockNode_anon_out_2_clock_0; // @[ClockDomain.scala:14:9]
wire auto_fixedClockNode_anon_out_2_reset_0; // @[ClockDomain.scala:14:9]
wire auto_fixedClockNode_anon_out_1_clock_0; // @[ClockDomain.scala:14:9]
wire auto_fixedClockNode_anon_out_1_reset_0; // @[ClockDomain.scala:14:9]
wire auto_fixedClockNode_anon_out_0_clock_0; // @[ClockDomain.scala:14:9]
wire auto_fixedClockNode_anon_out_0_reset_0; // @[ClockDomain.scala:14:9]
wire clockSinkNodeIn_clock; // @[MixedNode.scala:551:17]
wire clockSinkNodeIn_reset; // @[MixedNode.scala:551:17]
wire childClock; // @[LazyModuleImp.scala:155:31]
wire childReset; // @[LazyModuleImp.scala:158:31]
wire sbus_clock_groups_nodeIn_member_sbus_0_clock = sbus_clock_groups_auto_in_member_sbus_0_clock; // @[ClockGroup.scala:53:9]
wire sbus_clock_groups_nodeOut_member_sbus_0_clock; // @[MixedNode.scala:542:17]
wire sbus_clock_groups_nodeIn_member_sbus_0_reset = sbus_clock_groups_auto_in_member_sbus_0_reset; // @[ClockGroup.scala:53:9]
wire sbus_clock_groups_nodeOut_member_sbus_0_reset; // @[MixedNode.scala:542:17]
wire clockGroup_auto_in_member_sbus_0_clock = sbus_clock_groups_auto_out_member_sbus_0_clock; // @[ClockGroup.scala:24:9, :53:9]
wire clockGroup_auto_in_member_sbus_0_reset = sbus_clock_groups_auto_out_member_sbus_0_reset; // @[ClockGroup.scala:24:9, :53:9]
assign sbus_clock_groups_auto_out_member_sbus_0_clock = sbus_clock_groups_nodeOut_member_sbus_0_clock; // @[ClockGroup.scala:53:9]
assign sbus_clock_groups_auto_out_member_sbus_0_reset = sbus_clock_groups_nodeOut_member_sbus_0_reset; // @[ClockGroup.scala:53:9]
assign sbus_clock_groups_nodeOut_member_sbus_0_clock = sbus_clock_groups_nodeIn_member_sbus_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign sbus_clock_groups_nodeOut_member_sbus_0_reset = sbus_clock_groups_nodeIn_member_sbus_0_reset; // @[MixedNode.scala:542:17, :551:17]
wire clockGroup_nodeIn_member_sbus_0_clock = clockGroup_auto_in_member_sbus_0_clock; // @[ClockGroup.scala:24:9]
wire clockGroup_nodeOut_clock; // @[MixedNode.scala:542:17]
wire clockGroup_nodeIn_member_sbus_0_reset = clockGroup_auto_in_member_sbus_0_reset; // @[ClockGroup.scala:24:9]
wire clockGroup_nodeOut_reset; // @[MixedNode.scala:542:17]
wire clockGroup_auto_out_clock; // @[ClockGroup.scala:24:9]
wire clockGroup_auto_out_reset; // @[ClockGroup.scala:24:9]
assign clockGroup_auto_out_clock = clockGroup_nodeOut_clock; // @[ClockGroup.scala:24:9]
assign clockGroup_auto_out_reset = clockGroup_nodeOut_reset; // @[ClockGroup.scala:24:9]
assign clockGroup_nodeOut_clock = clockGroup_nodeIn_member_sbus_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign clockGroup_nodeOut_reset = clockGroup_nodeIn_member_sbus_0_reset; // @[MixedNode.scala:542:17, :551:17]
wire fixer_x1_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_auto_tl_out_a_ready = fixer_auto_anon_in_1_a_ready; // @[FIFOFixer.scala:50:9]
wire coupler_from_rockettile_auto_tl_out_a_valid; // @[LazyModuleImp.scala:138:7]
wire fixer_x1_anonIn_a_valid = fixer_auto_anon_in_1_a_valid; // @[FIFOFixer.scala:50:9]
wire [2:0] coupler_from_rockettile_auto_tl_out_a_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [2:0] fixer_x1_anonIn_a_bits_opcode = fixer_auto_anon_in_1_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [2:0] coupler_from_rockettile_auto_tl_out_a_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] fixer_x1_anonIn_a_bits_param = fixer_auto_anon_in_1_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [3:0] coupler_from_rockettile_auto_tl_out_a_bits_size; // @[LazyModuleImp.scala:138:7]
wire [3:0] fixer_x1_anonIn_a_bits_size = fixer_auto_anon_in_1_a_bits_size; // @[FIFOFixer.scala:50:9]
wire coupler_from_rockettile_auto_tl_out_a_bits_source; // @[LazyModuleImp.scala:138:7]
wire fixer_x1_anonIn_a_bits_source = fixer_auto_anon_in_1_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [31:0] coupler_from_rockettile_auto_tl_out_a_bits_address; // @[LazyModuleImp.scala:138:7]
wire [31:0] fixer_x1_anonIn_a_bits_address = fixer_auto_anon_in_1_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [7:0] coupler_from_rockettile_auto_tl_out_a_bits_mask; // @[LazyModuleImp.scala:138:7]
wire [7:0] fixer_x1_anonIn_a_bits_mask = fixer_auto_anon_in_1_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [63:0] coupler_from_rockettile_auto_tl_out_a_bits_data; // @[LazyModuleImp.scala:138:7]
wire [63:0] fixer_x1_anonIn_a_bits_data = fixer_auto_anon_in_1_a_bits_data; // @[FIFOFixer.scala:50:9]
wire coupler_from_rockettile_auto_tl_out_a_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire fixer_x1_anonIn_a_bits_corrupt = fixer_auto_anon_in_1_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire coupler_from_rockettile_auto_tl_out_d_ready; // @[LazyModuleImp.scala:138:7]
wire fixer_x1_anonIn_d_ready = fixer_auto_anon_in_1_d_ready; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] fixer_x1_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_auto_tl_out_d_valid = fixer_auto_anon_in_1_d_valid; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_x1_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] coupler_from_rockettile_auto_tl_out_d_bits_opcode = fixer_auto_anon_in_1_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_x1_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] coupler_from_rockettile_auto_tl_out_d_bits_param = fixer_auto_anon_in_1_d_bits_param; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [3:0] coupler_from_rockettile_auto_tl_out_d_bits_size = fixer_auto_anon_in_1_d_bits_size; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_auto_tl_out_d_bits_source = fixer_auto_anon_in_1_d_bits_source; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_auto_tl_out_d_bits_sink = fixer_auto_anon_in_1_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_x1_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_auto_tl_out_d_bits_denied = fixer_auto_anon_in_1_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire [63:0] coupler_from_rockettile_auto_tl_out_d_bits_data = fixer_auto_anon_in_1_d_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_auto_tl_out_d_bits_corrupt = fixer_auto_anon_in_1_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire coupler_from_bus_named_fbus_auto_widget_anon_out_a_ready = fixer_auto_anon_in_0_a_ready; // @[FIFOFixer.scala:50:9]
wire coupler_from_bus_named_fbus_auto_widget_anon_out_a_valid; // @[LazyModuleImp.scala:138:7]
wire fixer_anonIn_a_valid = fixer_auto_anon_in_0_a_valid; // @[FIFOFixer.scala:50:9]
wire [2:0] coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [2:0] fixer_anonIn_a_bits_opcode = fixer_auto_anon_in_0_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [2:0] coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] fixer_anonIn_a_bits_param = fixer_auto_anon_in_0_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [3:0] coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_size; // @[LazyModuleImp.scala:138:7]
wire [3:0] fixer_anonIn_a_bits_size = fixer_auto_anon_in_0_a_bits_size; // @[FIFOFixer.scala:50:9]
wire [4:0] coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_source; // @[LazyModuleImp.scala:138:7]
wire [4:0] fixer_anonIn_a_bits_source = fixer_auto_anon_in_0_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [31:0] coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_address; // @[LazyModuleImp.scala:138:7]
wire [31:0] fixer_anonIn_a_bits_address = fixer_auto_anon_in_0_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [7:0] coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_mask; // @[LazyModuleImp.scala:138:7]
wire [7:0] fixer_anonIn_a_bits_mask = fixer_auto_anon_in_0_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [63:0] coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_data; // @[LazyModuleImp.scala:138:7]
wire [63:0] fixer_anonIn_a_bits_data = fixer_auto_anon_in_0_a_bits_data; // @[FIFOFixer.scala:50:9]
wire coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire fixer_anonIn_a_bits_corrupt = fixer_auto_anon_in_0_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire coupler_from_bus_named_fbus_auto_widget_anon_out_d_ready; // @[LazyModuleImp.scala:138:7]
wire fixer_anonIn_d_ready = fixer_auto_anon_in_0_d_ready; // @[FIFOFixer.scala:50:9]
wire fixer_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] fixer_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire coupler_from_bus_named_fbus_auto_widget_anon_out_d_valid = fixer_auto_anon_in_0_d_valid; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_opcode = fixer_auto_anon_in_0_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_param = fixer_auto_anon_in_0_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [3:0] coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_size = fixer_auto_anon_in_0_d_bits_size; // @[FIFOFixer.scala:50:9]
wire fixer_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire [4:0] coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_source = fixer_auto_anon_in_0_d_bits_source; // @[FIFOFixer.scala:50:9]
wire fixer_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_sink = fixer_auto_anon_in_0_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_denied = fixer_auto_anon_in_0_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire fixer_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire [63:0] coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_data = fixer_auto_anon_in_0_d_bits_data; // @[FIFOFixer.scala:50:9]
wire coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_corrupt = fixer_auto_anon_in_0_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonOut_a_ready = fixer_auto_anon_out_1_a_ready; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] fixer_x1_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] fixer_x1_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] fixer_x1_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire fixer_x1_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] fixer_x1_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] fixer_x1_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] fixer_x1_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire fixer_x1_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire fixer_x1_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire fixer_x1_anonOut_d_valid = fixer_auto_anon_out_1_d_valid; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_x1_anonOut_d_bits_opcode = fixer_auto_anon_out_1_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_x1_anonOut_d_bits_param = fixer_auto_anon_out_1_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_x1_anonOut_d_bits_size = fixer_auto_anon_out_1_d_bits_size; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonOut_d_bits_source = fixer_auto_anon_out_1_d_bits_source; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonOut_d_bits_sink = fixer_auto_anon_out_1_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonOut_d_bits_denied = fixer_auto_anon_out_1_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_x1_anonOut_d_bits_data = fixer_auto_anon_out_1_d_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_x1_anonOut_d_bits_corrupt = fixer_auto_anon_out_1_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire fixer_anonOut_a_ready = fixer_auto_anon_out_0_a_ready; // @[FIFOFixer.scala:50:9]
wire fixer_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] fixer_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] fixer_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] fixer_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] fixer_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] fixer_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] fixer_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] fixer_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire fixer_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire fixer_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire fixer_anonOut_d_valid = fixer_auto_anon_out_0_d_valid; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_anonOut_d_bits_opcode = fixer_auto_anon_out_0_d_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [1:0] fixer_anonOut_d_bits_param = fixer_auto_anon_out_0_d_bits_param; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_anonOut_d_bits_size = fixer_auto_anon_out_0_d_bits_size; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_anonOut_d_bits_source = fixer_auto_anon_out_0_d_bits_source; // @[FIFOFixer.scala:50:9]
wire fixer_anonOut_d_bits_sink = fixer_auto_anon_out_0_d_bits_sink; // @[FIFOFixer.scala:50:9]
wire fixer_anonOut_d_bits_denied = fixer_auto_anon_out_0_d_bits_denied; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_anonOut_d_bits_data = fixer_auto_anon_out_0_d_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_anonOut_d_bits_corrupt = fixer_auto_anon_out_0_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_1_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_1_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_auto_anon_out_1_a_bits_size; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_1_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [31:0] fixer_auto_anon_out_1_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [7:0] fixer_auto_anon_out_1_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_out_1_a_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_1_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_1_a_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_1_d_ready; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_0_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [2:0] fixer_auto_anon_out_0_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [3:0] fixer_auto_anon_out_0_a_bits_size; // @[FIFOFixer.scala:50:9]
wire [4:0] fixer_auto_anon_out_0_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [31:0] fixer_auto_anon_out_0_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [7:0] fixer_auto_anon_out_0_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [63:0] fixer_auto_anon_out_0_a_bits_data; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_0_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_0_a_valid; // @[FIFOFixer.scala:50:9]
wire fixer_auto_anon_out_0_d_ready; // @[FIFOFixer.scala:50:9]
wire fixer__anonOut_a_valid_T_2; // @[FIFOFixer.scala:95:33]
wire fixer__anonIn_a_ready_T_2 = fixer_anonOut_a_ready; // @[FIFOFixer.scala:96:33]
assign fixer_auto_anon_out_0_a_valid = fixer_anonOut_a_valid; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_0_a_bits_opcode = fixer_anonOut_a_bits_opcode; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_0_a_bits_param = fixer_anonOut_a_bits_param; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_0_a_bits_size = fixer_anonOut_a_bits_size; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_0_a_bits_source = fixer_anonOut_a_bits_source; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_0_a_bits_address = fixer_anonOut_a_bits_address; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_0_a_bits_mask = fixer_anonOut_a_bits_mask; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_0_a_bits_data = fixer_anonOut_a_bits_data; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_0_a_bits_corrupt = fixer_anonOut_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_0_d_ready = fixer_anonOut_d_ready; // @[FIFOFixer.scala:50:9]
assign fixer_anonIn_d_valid = fixer_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_opcode = fixer_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_param = fixer_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_size = fixer_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_source = fixer_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_sink = fixer_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_denied = fixer_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_data = fixer_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonIn_d_bits_corrupt = fixer_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
wire fixer__anonOut_a_valid_T_5; // @[FIFOFixer.scala:95:33]
wire fixer__anonIn_a_ready_T_5 = fixer_x1_anonOut_a_ready; // @[FIFOFixer.scala:96:33]
assign fixer_auto_anon_out_1_a_valid = fixer_x1_anonOut_a_valid; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_1_a_bits_opcode = fixer_x1_anonOut_a_bits_opcode; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_1_a_bits_param = fixer_x1_anonOut_a_bits_param; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_1_a_bits_size = fixer_x1_anonOut_a_bits_size; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_1_a_bits_source = fixer_x1_anonOut_a_bits_source; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_1_a_bits_address = fixer_x1_anonOut_a_bits_address; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_1_a_bits_mask = fixer_x1_anonOut_a_bits_mask; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_1_a_bits_data = fixer_x1_anonOut_a_bits_data; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_1_a_bits_corrupt = fixer_x1_anonOut_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_out_1_d_ready = fixer_x1_anonOut_d_ready; // @[FIFOFixer.scala:50:9]
assign fixer_x1_anonIn_d_valid = fixer_x1_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonIn_d_bits_opcode = fixer_x1_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonIn_d_bits_param = fixer_x1_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonIn_d_bits_size = fixer_x1_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonIn_d_bits_source = fixer_x1_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonIn_d_bits_sink = fixer_x1_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonIn_d_bits_denied = fixer_x1_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonIn_d_bits_data = fixer_x1_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonIn_d_bits_corrupt = fixer_x1_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign fixer_auto_anon_in_0_a_ready = fixer_anonIn_a_ready; // @[FIFOFixer.scala:50:9]
assign fixer__anonOut_a_valid_T_2 = fixer_anonIn_a_valid; // @[FIFOFixer.scala:95:33]
assign fixer_anonOut_a_bits_opcode = fixer_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_param = fixer_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_size = fixer_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_source = fixer_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_address = fixer_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
wire [31:0] fixer__a_notFIFO_T = fixer_anonIn_a_bits_address; // @[Parameters.scala:137:31]
wire [31:0] fixer__a_id_T = fixer_anonIn_a_bits_address; // @[Parameters.scala:137:31]
assign fixer_anonOut_a_bits_mask = fixer_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_data = fixer_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_a_bits_corrupt = fixer_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign fixer_anonOut_d_ready = fixer_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign fixer_auto_anon_in_0_d_valid = fixer_anonIn_d_valid; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_0_d_bits_opcode = fixer_anonIn_d_bits_opcode; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_0_d_bits_param = fixer_anonIn_d_bits_param; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_0_d_bits_size = fixer_anonIn_d_bits_size; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_0_d_bits_source = fixer_anonIn_d_bits_source; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_0_d_bits_sink = fixer_anonIn_d_bits_sink; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_0_d_bits_denied = fixer_anonIn_d_bits_denied; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_0_d_bits_data = fixer_anonIn_d_bits_data; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_0_d_bits_corrupt = fixer_anonIn_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_1_a_ready = fixer_x1_anonIn_a_ready; // @[FIFOFixer.scala:50:9]
assign fixer__anonOut_a_valid_T_5 = fixer_x1_anonIn_a_valid; // @[FIFOFixer.scala:95:33]
assign fixer_x1_anonOut_a_bits_opcode = fixer_x1_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonOut_a_bits_param = fixer_x1_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonOut_a_bits_size = fixer_x1_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonOut_a_bits_source = fixer_x1_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonOut_a_bits_address = fixer_x1_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
wire [31:0] fixer__a_notFIFO_T_5 = fixer_x1_anonIn_a_bits_address; // @[Parameters.scala:137:31]
wire [31:0] fixer__a_id_T_5 = fixer_x1_anonIn_a_bits_address; // @[Parameters.scala:137:31]
assign fixer_x1_anonOut_a_bits_mask = fixer_x1_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonOut_a_bits_data = fixer_x1_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonOut_a_bits_corrupt = fixer_x1_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign fixer_x1_anonOut_d_ready = fixer_x1_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign fixer_auto_anon_in_1_d_valid = fixer_x1_anonIn_d_valid; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_1_d_bits_opcode = fixer_x1_anonIn_d_bits_opcode; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_1_d_bits_param = fixer_x1_anonIn_d_bits_param; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_1_d_bits_size = fixer_x1_anonIn_d_bits_size; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_1_d_bits_source = fixer_x1_anonIn_d_bits_source; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_1_d_bits_sink = fixer_x1_anonIn_d_bits_sink; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_1_d_bits_denied = fixer_x1_anonIn_d_bits_denied; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_1_d_bits_data = fixer_x1_anonIn_d_bits_data; // @[FIFOFixer.scala:50:9]
assign fixer_auto_anon_in_1_d_bits_corrupt = fixer_x1_anonIn_d_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire [32:0] fixer__a_notFIFO_T_1 = {1'h0, fixer__a_notFIFO_T}; // @[Parameters.scala:137:{31,41}]
wire [32:0] fixer__a_id_T_1 = {1'h0, fixer__a_id_T}; // @[Parameters.scala:137:{31,41}]
wire fixer__a_first_T = fixer_anonIn_a_ready & fixer_anonIn_a_valid; // @[Decoupled.scala:51:35]
wire [26:0] fixer__a_first_beats1_decode_T = 27'hFFF << fixer_anonIn_a_bits_size; // @[package.scala:243:71]
wire [11:0] fixer__a_first_beats1_decode_T_1 = fixer__a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] fixer__a_first_beats1_decode_T_2 = ~fixer__a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] fixer_a_first_beats1_decode = fixer__a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire fixer__a_first_beats1_opdata_T = fixer_anonIn_a_bits_opcode[2]; // @[Edges.scala:92:37]
wire fixer_a_first_beats1_opdata = ~fixer__a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] fixer_a_first_beats1 = fixer_a_first_beats1_opdata ? fixer_a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] fixer_a_first_counter; // @[Edges.scala:229:27]
wire [9:0] fixer__a_first_counter1_T = {1'h0, fixer_a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] fixer_a_first_counter1 = fixer__a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire fixer_a_first = fixer_a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire fixer__a_first_last_T = fixer_a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire fixer__a_first_last_T_1 = fixer_a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire fixer_a_first_last = fixer__a_first_last_T | fixer__a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire fixer_a_first_done = fixer_a_first_last & fixer__a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] fixer__a_first_count_T = ~fixer_a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] fixer_a_first_count = fixer_a_first_beats1 & fixer__a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] fixer__a_first_counter_T = fixer_a_first ? fixer_a_first_beats1 : fixer_a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire fixer__d_first_T = fixer_anonOut_d_ready & fixer_anonOut_d_valid; // @[Decoupled.scala:51:35]
wire [26:0] fixer__d_first_beats1_decode_T = 27'hFFF << fixer_anonOut_d_bits_size; // @[package.scala:243:71]
wire [11:0] fixer__d_first_beats1_decode_T_1 = fixer__d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] fixer__d_first_beats1_decode_T_2 = ~fixer__d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] fixer_d_first_beats1_decode = fixer__d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire fixer_d_first_beats1_opdata = fixer_anonOut_d_bits_opcode[0]; // @[Edges.scala:106:36]
wire [8:0] fixer_d_first_beats1 = fixer_d_first_beats1_opdata ? fixer_d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] fixer_d_first_counter; // @[Edges.scala:229:27]
wire [9:0] fixer__d_first_counter1_T = {1'h0, fixer_d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] fixer_d_first_counter1 = fixer__d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire fixer_d_first_first = fixer_d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire fixer__d_first_last_T = fixer_d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire fixer__d_first_last_T_1 = fixer_d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire fixer_d_first_last = fixer__d_first_last_T | fixer__d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire fixer_d_first_done = fixer_d_first_last & fixer__d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] fixer__d_first_count_T = ~fixer_d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] fixer_d_first_count = fixer_d_first_beats1 & fixer__d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] fixer__d_first_counter_T = fixer_d_first_first ? fixer_d_first_beats1 : fixer_d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire fixer__d_first_T_1 = fixer_anonOut_d_bits_opcode != 3'h6; // @[FIFOFixer.scala:75:63]
wire fixer_d_first = fixer_d_first_first & fixer__d_first_T_1; // @[FIFOFixer.scala:75:{42,63}]
reg fixer_flight_0; // @[FIFOFixer.scala:79:27]
reg fixer_flight_1; // @[FIFOFixer.scala:79:27]
reg fixer_flight_2; // @[FIFOFixer.scala:79:27]
reg fixer_flight_3; // @[FIFOFixer.scala:79:27]
reg fixer_flight_4; // @[FIFOFixer.scala:79:27]
reg fixer_flight_5; // @[FIFOFixer.scala:79:27]
reg fixer_flight_6; // @[FIFOFixer.scala:79:27]
reg fixer_flight_7; // @[FIFOFixer.scala:79:27]
reg fixer_flight_8; // @[FIFOFixer.scala:79:27]
reg fixer_flight_9; // @[FIFOFixer.scala:79:27]
reg fixer_flight_10; // @[FIFOFixer.scala:79:27]
reg fixer_flight_11; // @[FIFOFixer.scala:79:27]
reg fixer_flight_12; // @[FIFOFixer.scala:79:27]
reg fixer_flight_13; // @[FIFOFixer.scala:79:27]
reg fixer_flight_14; // @[FIFOFixer.scala:79:27]
reg fixer_flight_15; // @[FIFOFixer.scala:79:27]
reg fixer_flight_16; // @[FIFOFixer.scala:79:27]
wire fixer__T_2 = fixer_anonIn_d_ready & fixer_anonIn_d_valid; // @[Decoupled.scala:51:35]
assign fixer_anonOut_a_valid = fixer__anonOut_a_valid_T_2; // @[FIFOFixer.scala:95:33]
assign fixer_anonIn_a_ready = fixer__anonIn_a_ready_T_2; // @[FIFOFixer.scala:96:33]
reg [16:0] fixer_SourceIdFIFOed; // @[FIFOFixer.scala:115:35]
wire [16:0] fixer_SourceIdSet; // @[FIFOFixer.scala:116:36]
wire [16:0] fixer_SourceIdClear; // @[FIFOFixer.scala:117:38]
wire [31:0] fixer__SourceIdSet_T = 32'h1 << fixer_anonIn_a_bits_source; // @[OneHot.scala:58:35]
assign fixer_SourceIdSet = fixer_a_first & fixer__a_first_T ? fixer__SourceIdSet_T[16:0] : 17'h0; // @[OneHot.scala:58:35]
wire [31:0] fixer__SourceIdClear_T = 32'h1 << fixer_anonIn_d_bits_source; // @[OneHot.scala:58:35]
assign fixer_SourceIdClear = fixer_d_first & fixer__T_2 ? fixer__SourceIdClear_T[16:0] : 17'h0; // @[OneHot.scala:58:35]
wire [16:0] fixer__SourceIdFIFOed_T = fixer_SourceIdFIFOed | fixer_SourceIdSet; // @[FIFOFixer.scala:115:35, :116:36, :126:40]
wire fixer_allIDs_FIFOed = &fixer_SourceIdFIFOed; // @[FIFOFixer.scala:115:35, :127:41]
wire [32:0] fixer__a_notFIFO_T_6 = {1'h0, fixer__a_notFIFO_T_5}; // @[Parameters.scala:137:{31,41}]
wire [32:0] fixer__a_id_T_6 = {1'h0, fixer__a_id_T_5}; // @[Parameters.scala:137:{31,41}]
wire fixer__a_first_T_1 = fixer_x1_anonIn_a_ready & fixer_x1_anonIn_a_valid; // @[Decoupled.scala:51:35]
wire [26:0] fixer__a_first_beats1_decode_T_3 = 27'hFFF << fixer_x1_anonIn_a_bits_size; // @[package.scala:243:71]
wire [11:0] fixer__a_first_beats1_decode_T_4 = fixer__a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] fixer__a_first_beats1_decode_T_5 = ~fixer__a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] fixer_a_first_beats1_decode_1 = fixer__a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire fixer__a_first_beats1_opdata_T_1 = fixer_x1_anonIn_a_bits_opcode[2]; // @[Edges.scala:92:37]
wire fixer_a_first_beats1_opdata_1 = ~fixer__a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] fixer_a_first_beats1_1 = fixer_a_first_beats1_opdata_1 ? fixer_a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] fixer_a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] fixer__a_first_counter1_T_1 = {1'h0, fixer_a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] fixer_a_first_counter1_1 = fixer__a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire fixer_a_first_1 = fixer_a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire fixer__a_first_last_T_2 = fixer_a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire fixer__a_first_last_T_3 = fixer_a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire fixer_a_first_last_1 = fixer__a_first_last_T_2 | fixer__a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire fixer_a_first_done_1 = fixer_a_first_last_1 & fixer__a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] fixer__a_first_count_T_1 = ~fixer_a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] fixer_a_first_count_1 = fixer_a_first_beats1_1 & fixer__a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] fixer__a_first_counter_T_1 = fixer_a_first_1 ? fixer_a_first_beats1_1 : fixer_a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire fixer__d_first_T_2 = fixer_x1_anonOut_d_ready & fixer_x1_anonOut_d_valid; // @[Decoupled.scala:51:35]
wire [26:0] fixer__d_first_beats1_decode_T_3 = 27'hFFF << fixer_x1_anonOut_d_bits_size; // @[package.scala:243:71]
wire [11:0] fixer__d_first_beats1_decode_T_4 = fixer__d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] fixer__d_first_beats1_decode_T_5 = ~fixer__d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] fixer_d_first_beats1_decode_1 = fixer__d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire fixer_d_first_beats1_opdata_1 = fixer_x1_anonOut_d_bits_opcode[0]; // @[Edges.scala:106:36]
wire [8:0] fixer_d_first_beats1_1 = fixer_d_first_beats1_opdata_1 ? fixer_d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] fixer_d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] fixer__d_first_counter1_T_1 = {1'h0, fixer_d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] fixer_d_first_counter1_1 = fixer__d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire fixer_d_first_first_1 = fixer_d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire fixer__d_first_last_T_2 = fixer_d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire fixer__d_first_last_T_3 = fixer_d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire fixer_d_first_last_1 = fixer__d_first_last_T_2 | fixer__d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire fixer_d_first_done_1 = fixer_d_first_last_1 & fixer__d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] fixer__d_first_count_T_1 = ~fixer_d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] fixer_d_first_count_1 = fixer_d_first_beats1_1 & fixer__d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] fixer__d_first_counter_T_1 = fixer_d_first_first_1 ? fixer_d_first_beats1_1 : fixer_d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire fixer__d_first_T_3 = fixer_x1_anonOut_d_bits_opcode != 3'h6; // @[FIFOFixer.scala:75:63]
wire fixer_d_first_1 = fixer_d_first_first_1 & fixer__d_first_T_3; // @[FIFOFixer.scala:75:{42,63}]
reg fixer_flight_1_0; // @[FIFOFixer.scala:79:27]
reg fixer_flight_1_1; // @[FIFOFixer.scala:79:27]
wire fixer__T_32 = fixer_x1_anonIn_d_ready & fixer_x1_anonIn_d_valid; // @[Decoupled.scala:51:35]
assign fixer_x1_anonOut_a_valid = fixer__anonOut_a_valid_T_5; // @[FIFOFixer.scala:95:33]
assign fixer_x1_anonIn_a_ready = fixer__anonIn_a_ready_T_5; // @[FIFOFixer.scala:96:33]
reg [1:0] fixer_SourceIdFIFOed_1; // @[FIFOFixer.scala:115:35]
wire [1:0] fixer_SourceIdSet_1; // @[FIFOFixer.scala:116:36]
wire [1:0] fixer_SourceIdClear_1; // @[FIFOFixer.scala:117:38]
wire [1:0] fixer__SourceIdSet_T_1 = 2'h1 << fixer_x1_anonIn_a_bits_source; // @[OneHot.scala:58:35]
assign fixer_SourceIdSet_1 = fixer_a_first_1 & fixer__a_first_T_1 ? fixer__SourceIdSet_T_1 : 2'h0; // @[OneHot.scala:58:35]
wire [1:0] fixer__SourceIdClear_T_1 = 2'h1 << fixer_x1_anonIn_d_bits_source; // @[OneHot.scala:58:35]
assign fixer_SourceIdClear_1 = fixer_d_first_1 & fixer__T_32 ? fixer__SourceIdClear_T_1 : 2'h0; // @[OneHot.scala:58:35]
wire [1:0] fixer__SourceIdFIFOed_T_1 = fixer_SourceIdFIFOed_1 | fixer_SourceIdSet_1; // @[FIFOFixer.scala:115:35, :116:36, :126:40]
wire fixer_allIDs_FIFOed_1 = &fixer_SourceIdFIFOed_1; // @[FIFOFixer.scala:115:35, :127:41]
wire coupler_to_bus_named_cbus_widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_in_a_valid = coupler_to_bus_named_cbus_auto_widget_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_opcode = coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_param = coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_size = coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [5:0] coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_source = coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_address = coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_mask = coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_data = coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_corrupt = coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_in_d_ready = coupler_to_bus_named_cbus_auto_widget_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire [5:0] coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_bus_xingOut_a_ready = coupler_to_bus_named_cbus_auto_bus_xing_out_a_ready; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_cbus_bus_xingOut_a_valid; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_valid_0 = coupler_to_bus_named_cbus_auto_bus_xing_out_a_valid; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_to_bus_named_cbus_bus_xingOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_opcode_0 = coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_to_bus_named_cbus_bus_xingOut_a_bits_param; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_param_0 = coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] coupler_to_bus_named_cbus_bus_xingOut_a_bits_size; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_size_0 = coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_size; // @[ClockDomain.scala:14:9]
wire [5:0] coupler_to_bus_named_cbus_bus_xingOut_a_bits_source; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_source_0 = coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] coupler_to_bus_named_cbus_bus_xingOut_a_bits_address; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_address_0 = coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_address; // @[ClockDomain.scala:14:9]
wire [7:0] coupler_to_bus_named_cbus_bus_xingOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_mask_0 = coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_mask; // @[ClockDomain.scala:14:9]
wire [63:0] coupler_to_bus_named_cbus_bus_xingOut_a_bits_data; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_data_0 = coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_data; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_cbus_bus_xingOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_corrupt_0 = coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_corrupt; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_cbus_bus_xingOut_d_ready; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_d_ready_0 = coupler_to_bus_named_cbus_auto_bus_xing_out_d_ready; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_cbus_bus_xingOut_d_valid = coupler_to_bus_named_cbus_auto_bus_xing_out_d_valid; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_bus_named_cbus_bus_xingOut_d_bits_opcode = coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_opcode; // @[MixedNode.scala:542:17]
wire [1:0] coupler_to_bus_named_cbus_bus_xingOut_d_bits_param = coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] coupler_to_bus_named_cbus_bus_xingOut_d_bits_size = coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_size; // @[MixedNode.scala:542:17]
wire [5:0] coupler_to_bus_named_cbus_bus_xingOut_d_bits_source = coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_source; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_cbus_bus_xingOut_d_bits_sink = coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_sink; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_cbus_bus_xingOut_d_bits_denied = coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_denied; // @[MixedNode.scala:542:17]
wire [63:0] coupler_to_bus_named_cbus_bus_xingOut_d_bits_data = coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_data; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_cbus_bus_xingOut_d_bits_corrupt = coupler_to_bus_named_cbus_auto_bus_xing_out_d_bits_corrupt; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_cbus_auto_widget_anon_in_a_ready; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [1:0] coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_param; // @[LazyModuleImp.scala:138:7]
wire [3:0] coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_size; // @[LazyModuleImp.scala:138:7]
wire [5:0] coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_source; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_sink; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_denied; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_data; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_auto_widget_anon_in_d_valid; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_cbus_widget_anonIn_a_ready; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_cbus_auto_widget_anon_in_a_ready = coupler_to_bus_named_cbus_widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_anonIn_a_valid = coupler_to_bus_named_cbus_widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_widget_anonIn_a_bits_opcode = coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_widget_anonIn_a_bits_param = coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_to_bus_named_cbus_widget_anonIn_a_bits_size = coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [5:0] coupler_to_bus_named_cbus_widget_anonIn_a_bits_source = coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_to_bus_named_cbus_widget_anonIn_a_bits_address = coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_to_bus_named_cbus_widget_anonIn_a_bits_mask = coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_cbus_widget_anonIn_a_bits_data = coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_anonIn_a_bits_corrupt = coupler_to_bus_named_cbus_widget_auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_anonIn_d_ready = coupler_to_bus_named_cbus_widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_anonIn_d_valid; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_cbus_auto_widget_anon_in_d_valid = coupler_to_bus_named_cbus_widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_opcode = coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_to_bus_named_cbus_widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_param = coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_to_bus_named_cbus_widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_size = coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire [5:0] coupler_to_bus_named_cbus_widget_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_source = coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_sink = coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_denied = coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_cbus_widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_data = coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_corrupt = coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_bus_xingIn_a_ready; // @[MixedNode.scala:551:17]
wire coupler_to_bus_named_cbus_widget_anonOut_a_ready = coupler_to_bus_named_cbus_widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_bus_named_cbus_widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_cbus_bus_xingIn_a_valid = coupler_to_bus_named_cbus_widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_widget_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_bus_named_cbus_bus_xingIn_a_bits_opcode = coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_to_bus_named_cbus_widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_bus_named_cbus_bus_xingIn_a_bits_param = coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [5:0] coupler_to_bus_named_cbus_widget_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [3:0] coupler_to_bus_named_cbus_bus_xingIn_a_bits_size = coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_to_bus_named_cbus_widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [5:0] coupler_to_bus_named_cbus_bus_xingIn_a_bits_source = coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_to_bus_named_cbus_widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [31:0] coupler_to_bus_named_cbus_bus_xingIn_a_bits_address = coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_cbus_widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire [7:0] coupler_to_bus_named_cbus_bus_xingIn_a_bits_mask = coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire [63:0] coupler_to_bus_named_cbus_bus_xingIn_a_bits_data = coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_widget_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_cbus_bus_xingIn_a_bits_corrupt = coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_bus_xingIn_d_ready = coupler_to_bus_named_cbus_widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_bus_xingIn_d_valid; // @[MixedNode.scala:551:17]
wire coupler_to_bus_named_cbus_widget_anonOut_d_valid = coupler_to_bus_named_cbus_widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_cbus_bus_xingIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] coupler_to_bus_named_cbus_widget_anonOut_d_bits_opcode = coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_to_bus_named_cbus_bus_xingIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [1:0] coupler_to_bus_named_cbus_widget_anonOut_d_bits_param = coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_to_bus_named_cbus_bus_xingIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [3:0] coupler_to_bus_named_cbus_widget_anonOut_d_bits_size = coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [5:0] coupler_to_bus_named_cbus_bus_xingIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [5:0] coupler_to_bus_named_cbus_widget_anonOut_d_bits_source = coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_bus_xingIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire coupler_to_bus_named_cbus_widget_anonOut_d_bits_sink = coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_bus_xingIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire coupler_to_bus_named_cbus_widget_anonOut_d_bits_denied = coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_cbus_bus_xingIn_d_bits_data; // @[MixedNode.scala:551:17]
wire [63:0] coupler_to_bus_named_cbus_widget_anonOut_d_bits_data = coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_cbus_bus_xingIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire coupler_to_bus_named_cbus_widget_anonOut_d_bits_corrupt = coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_anonIn_a_ready = coupler_to_bus_named_cbus_widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_a_valid = coupler_to_bus_named_cbus_widget_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_opcode = coupler_to_bus_named_cbus_widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_param = coupler_to_bus_named_cbus_widget_anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_size = coupler_to_bus_named_cbus_widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_source = coupler_to_bus_named_cbus_widget_anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_address = coupler_to_bus_named_cbus_widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_mask = coupler_to_bus_named_cbus_widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_data = coupler_to_bus_named_cbus_widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_a_bits_corrupt = coupler_to_bus_named_cbus_widget_anonOut_a_bits_corrupt; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_d_ready = coupler_to_bus_named_cbus_widget_anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_anonIn_d_valid = coupler_to_bus_named_cbus_widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonIn_d_bits_opcode = coupler_to_bus_named_cbus_widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonIn_d_bits_param = coupler_to_bus_named_cbus_widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonIn_d_bits_size = coupler_to_bus_named_cbus_widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonIn_d_bits_source = coupler_to_bus_named_cbus_widget_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonIn_d_bits_sink = coupler_to_bus_named_cbus_widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonIn_d_bits_denied = coupler_to_bus_named_cbus_widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonIn_d_bits_data = coupler_to_bus_named_cbus_widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonIn_d_bits_corrupt = coupler_to_bus_named_cbus_widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_auto_anon_in_a_ready = coupler_to_bus_named_cbus_widget_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_anonOut_a_valid = coupler_to_bus_named_cbus_widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonOut_a_bits_opcode = coupler_to_bus_named_cbus_widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonOut_a_bits_param = coupler_to_bus_named_cbus_widget_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonOut_a_bits_size = coupler_to_bus_named_cbus_widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonOut_a_bits_source = coupler_to_bus_named_cbus_widget_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonOut_a_bits_address = coupler_to_bus_named_cbus_widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonOut_a_bits_mask = coupler_to_bus_named_cbus_widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonOut_a_bits_data = coupler_to_bus_named_cbus_widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonOut_a_bits_corrupt = coupler_to_bus_named_cbus_widget_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_anonOut_d_ready = coupler_to_bus_named_cbus_widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_auto_anon_in_d_valid = coupler_to_bus_named_cbus_widget_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_opcode = coupler_to_bus_named_cbus_widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_param = coupler_to_bus_named_cbus_widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_size = coupler_to_bus_named_cbus_widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_source = coupler_to_bus_named_cbus_widget_anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_sink = coupler_to_bus_named_cbus_widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_denied = coupler_to_bus_named_cbus_widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_data = coupler_to_bus_named_cbus_widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_in_d_bits_corrupt = coupler_to_bus_named_cbus_widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_bus_xingIn_a_ready = coupler_to_bus_named_cbus_bus_xingOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_auto_bus_xing_out_a_valid = coupler_to_bus_named_cbus_bus_xingOut_a_valid; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_opcode = coupler_to_bus_named_cbus_bus_xingOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_param = coupler_to_bus_named_cbus_bus_xingOut_a_bits_param; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_size = coupler_to_bus_named_cbus_bus_xingOut_a_bits_size; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_source = coupler_to_bus_named_cbus_bus_xingOut_a_bits_source; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_address = coupler_to_bus_named_cbus_bus_xingOut_a_bits_address; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_mask = coupler_to_bus_named_cbus_bus_xingOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_data = coupler_to_bus_named_cbus_bus_xingOut_a_bits_data; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_cbus_auto_bus_xing_out_a_bits_corrupt = coupler_to_bus_named_cbus_bus_xingOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_cbus_auto_bus_xing_out_d_ready = coupler_to_bus_named_cbus_bus_xingOut_d_ready; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_cbus_bus_xingIn_d_valid = coupler_to_bus_named_cbus_bus_xingOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingIn_d_bits_opcode = coupler_to_bus_named_cbus_bus_xingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingIn_d_bits_param = coupler_to_bus_named_cbus_bus_xingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingIn_d_bits_size = coupler_to_bus_named_cbus_bus_xingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingIn_d_bits_source = coupler_to_bus_named_cbus_bus_xingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingIn_d_bits_sink = coupler_to_bus_named_cbus_bus_xingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingIn_d_bits_denied = coupler_to_bus_named_cbus_bus_xingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingIn_d_bits_data = coupler_to_bus_named_cbus_bus_xingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingIn_d_bits_corrupt = coupler_to_bus_named_cbus_bus_xingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_a_ready = coupler_to_bus_named_cbus_bus_xingIn_a_ready; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_bus_xingOut_a_valid = coupler_to_bus_named_cbus_bus_xingIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingOut_a_bits_opcode = coupler_to_bus_named_cbus_bus_xingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingOut_a_bits_param = coupler_to_bus_named_cbus_bus_xingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingOut_a_bits_size = coupler_to_bus_named_cbus_bus_xingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingOut_a_bits_source = coupler_to_bus_named_cbus_bus_xingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingOut_a_bits_address = coupler_to_bus_named_cbus_bus_xingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingOut_a_bits_mask = coupler_to_bus_named_cbus_bus_xingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingOut_a_bits_data = coupler_to_bus_named_cbus_bus_xingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingOut_a_bits_corrupt = coupler_to_bus_named_cbus_bus_xingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_bus_xingOut_d_ready = coupler_to_bus_named_cbus_bus_xingIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_d_valid = coupler_to_bus_named_cbus_bus_xingIn_d_valid; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_opcode = coupler_to_bus_named_cbus_bus_xingIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_param = coupler_to_bus_named_cbus_bus_xingIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_size = coupler_to_bus_named_cbus_bus_xingIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_source = coupler_to_bus_named_cbus_bus_xingIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_sink = coupler_to_bus_named_cbus_bus_xingIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_denied = coupler_to_bus_named_cbus_bus_xingIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_data = coupler_to_bus_named_cbus_bus_xingIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_cbus_widget_auto_anon_out_d_bits_corrupt = coupler_to_bus_named_cbus_bus_xingIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_out_a_ready = coupler_from_bus_named_fbus_auto_widget_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
assign fixer_auto_anon_in_0_a_valid = coupler_from_bus_named_fbus_auto_widget_anon_out_a_valid; // @[FIFOFixer.scala:50:9]
wire [2:0] coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign fixer_auto_anon_in_0_a_bits_opcode = coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [2:0] coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
assign fixer_auto_anon_in_0_a_bits_param = coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [3:0] coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
assign fixer_auto_anon_in_0_a_bits_size = coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_size; // @[FIFOFixer.scala:50:9]
wire [4:0] coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
assign fixer_auto_anon_in_0_a_bits_source = coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [31:0] coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
assign fixer_auto_anon_in_0_a_bits_address = coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [7:0] coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
assign fixer_auto_anon_in_0_a_bits_mask = coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [63:0] coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
assign fixer_auto_anon_in_0_a_bits_data = coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_data; // @[FIFOFixer.scala:50:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
assign fixer_auto_anon_in_0_a_bits_corrupt = coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
assign fixer_auto_anon_in_0_d_ready = coupler_from_bus_named_fbus_auto_widget_anon_out_d_ready; // @[FIFOFixer.scala:50:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_out_d_valid = coupler_from_bus_named_fbus_auto_widget_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_opcode = coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_param = coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_size = coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_source = coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_sink = coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_denied = coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_data = coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_corrupt = coupler_from_bus_named_fbus_auto_widget_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_bus_xingIn_a_ready; // @[MixedNode.scala:551:17]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_a_ready_0 = coupler_from_bus_named_fbus_auto_bus_xing_in_a_ready; // @[ClockDomain.scala:14:9]
wire coupler_from_bus_named_fbus_bus_xingIn_a_valid = coupler_from_bus_named_fbus_auto_bus_xing_in_a_valid; // @[MixedNode.scala:551:17]
wire [2:0] coupler_from_bus_named_fbus_bus_xingIn_a_bits_opcode = coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] coupler_from_bus_named_fbus_bus_xingIn_a_bits_param = coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] coupler_from_bus_named_fbus_bus_xingIn_a_bits_size = coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] coupler_from_bus_named_fbus_bus_xingIn_a_bits_source = coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_source; // @[MixedNode.scala:551:17]
wire [31:0] coupler_from_bus_named_fbus_bus_xingIn_a_bits_address = coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_address; // @[MixedNode.scala:551:17]
wire [7:0] coupler_from_bus_named_fbus_bus_xingIn_a_bits_mask = coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_mask; // @[MixedNode.scala:551:17]
wire [63:0] coupler_from_bus_named_fbus_bus_xingIn_a_bits_data = coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_data; // @[MixedNode.scala:551:17]
wire coupler_from_bus_named_fbus_bus_xingIn_a_bits_corrupt = coupler_from_bus_named_fbus_auto_bus_xing_in_a_bits_corrupt; // @[MixedNode.scala:551:17]
wire coupler_from_bus_named_fbus_bus_xingIn_d_ready = coupler_from_bus_named_fbus_auto_bus_xing_in_d_ready; // @[MixedNode.scala:551:17]
wire coupler_from_bus_named_fbus_bus_xingIn_d_valid; // @[MixedNode.scala:551:17]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_valid_0 = coupler_from_bus_named_fbus_auto_bus_xing_in_d_valid; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_from_bus_named_fbus_bus_xingIn_d_bits_opcode; // @[MixedNode.scala:551:17]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_opcode_0 = coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_opcode; // @[ClockDomain.scala:14:9]
wire [1:0] coupler_from_bus_named_fbus_bus_xingIn_d_bits_param; // @[MixedNode.scala:551:17]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_param_0 = coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] coupler_from_bus_named_fbus_bus_xingIn_d_bits_size; // @[MixedNode.scala:551:17]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_size_0 = coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_size; // @[ClockDomain.scala:14:9]
wire [4:0] coupler_from_bus_named_fbus_bus_xingIn_d_bits_source; // @[MixedNode.scala:551:17]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_source_0 = coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_source; // @[ClockDomain.scala:14:9]
wire coupler_from_bus_named_fbus_bus_xingIn_d_bits_sink; // @[MixedNode.scala:551:17]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_sink_0 = coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_sink; // @[ClockDomain.scala:14:9]
wire coupler_from_bus_named_fbus_bus_xingIn_d_bits_denied; // @[MixedNode.scala:551:17]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_denied_0 = coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_denied; // @[ClockDomain.scala:14:9]
wire [63:0] coupler_from_bus_named_fbus_bus_xingIn_d_bits_data; // @[MixedNode.scala:551:17]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_data_0 = coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_data; // @[ClockDomain.scala:14:9]
wire coupler_from_bus_named_fbus_bus_xingIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_corrupt_0 = coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_corrupt; // @[ClockDomain.scala:14:9]
wire coupler_from_bus_named_fbus_widget_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire coupler_from_bus_named_fbus_bus_xingOut_a_ready = coupler_from_bus_named_fbus_widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_bus_xingOut_a_valid; // @[MixedNode.scala:542:17]
wire coupler_from_bus_named_fbus_widget_anonIn_a_valid = coupler_from_bus_named_fbus_widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_from_bus_named_fbus_bus_xingOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] coupler_from_bus_named_fbus_widget_anonIn_a_bits_opcode = coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_from_bus_named_fbus_bus_xingOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] coupler_from_bus_named_fbus_widget_anonIn_a_bits_param = coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_from_bus_named_fbus_bus_xingOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] coupler_from_bus_named_fbus_widget_anonIn_a_bits_size = coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_from_bus_named_fbus_bus_xingOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [4:0] coupler_from_bus_named_fbus_widget_anonIn_a_bits_source = coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_from_bus_named_fbus_bus_xingOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [31:0] coupler_from_bus_named_fbus_widget_anonIn_a_bits_address = coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_from_bus_named_fbus_bus_xingOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [7:0] coupler_from_bus_named_fbus_widget_anonIn_a_bits_mask = coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_from_bus_named_fbus_bus_xingOut_a_bits_data; // @[MixedNode.scala:542:17]
wire [63:0] coupler_from_bus_named_fbus_widget_anonIn_a_bits_data = coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_bus_xingOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire coupler_from_bus_named_fbus_widget_anonIn_a_bits_corrupt = coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_bus_xingOut_d_ready; // @[MixedNode.scala:542:17]
wire coupler_from_bus_named_fbus_widget_anonIn_d_ready = coupler_from_bus_named_fbus_widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] coupler_from_bus_named_fbus_widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire coupler_from_bus_named_fbus_bus_xingOut_d_valid = coupler_from_bus_named_fbus_widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_from_bus_named_fbus_widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] coupler_from_bus_named_fbus_bus_xingOut_d_bits_opcode = coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_from_bus_named_fbus_widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] coupler_from_bus_named_fbus_bus_xingOut_d_bits_param = coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_from_bus_named_fbus_widget_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [3:0] coupler_from_bus_named_fbus_bus_xingOut_d_bits_size = coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire [4:0] coupler_from_bus_named_fbus_bus_xingOut_d_bits_source = coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire coupler_from_bus_named_fbus_bus_xingOut_d_bits_sink = coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_from_bus_named_fbus_widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire coupler_from_bus_named_fbus_bus_xingOut_d_bits_denied = coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire [63:0] coupler_from_bus_named_fbus_bus_xingOut_d_bits_data = coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_bus_xingOut_d_bits_corrupt = coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonOut_a_ready = coupler_from_bus_named_fbus_widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonOut_a_valid; // @[MixedNode.scala:542:17]
assign coupler_from_bus_named_fbus_auto_widget_anon_out_a_valid = coupler_from_bus_named_fbus_widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_from_bus_named_fbus_widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_opcode = coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_from_bus_named_fbus_widget_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
assign coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_param = coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_from_bus_named_fbus_widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
assign coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_size = coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_from_bus_named_fbus_widget_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
assign coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_source = coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_from_bus_named_fbus_widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
assign coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_address = coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_from_bus_named_fbus_widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_mask = coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_from_bus_named_fbus_widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
assign coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_data = coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign coupler_from_bus_named_fbus_auto_widget_anon_out_a_bits_corrupt = coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonOut_d_ready; // @[MixedNode.scala:542:17]
assign coupler_from_bus_named_fbus_auto_widget_anon_out_d_ready = coupler_from_bus_named_fbus_widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonOut_d_valid = coupler_from_bus_named_fbus_widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_from_bus_named_fbus_widget_anonOut_d_bits_opcode = coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_from_bus_named_fbus_widget_anonOut_d_bits_param = coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] coupler_from_bus_named_fbus_widget_anonOut_d_bits_size = coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_from_bus_named_fbus_widget_anonOut_d_bits_source = coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonOut_d_bits_sink = coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonOut_d_bits_denied = coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_from_bus_named_fbus_widget_anonOut_d_bits_data = coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_from_bus_named_fbus_widget_anonOut_d_bits_corrupt = coupler_from_bus_named_fbus_widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_anonIn_a_ready = coupler_from_bus_named_fbus_widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_auto_anon_out_a_valid = coupler_from_bus_named_fbus_widget_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_opcode = coupler_from_bus_named_fbus_widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_param = coupler_from_bus_named_fbus_widget_anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_size = coupler_from_bus_named_fbus_widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_source = coupler_from_bus_named_fbus_widget_anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_address = coupler_from_bus_named_fbus_widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_mask = coupler_from_bus_named_fbus_widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_data = coupler_from_bus_named_fbus_widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_out_a_bits_corrupt = coupler_from_bus_named_fbus_widget_anonOut_a_bits_corrupt; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_out_d_ready = coupler_from_bus_named_fbus_widget_anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_anonIn_d_valid = coupler_from_bus_named_fbus_widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonIn_d_bits_opcode = coupler_from_bus_named_fbus_widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonIn_d_bits_param = coupler_from_bus_named_fbus_widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonIn_d_bits_size = coupler_from_bus_named_fbus_widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonIn_d_bits_source = coupler_from_bus_named_fbus_widget_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonIn_d_bits_sink = coupler_from_bus_named_fbus_widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonIn_d_bits_denied = coupler_from_bus_named_fbus_widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonIn_d_bits_data = coupler_from_bus_named_fbus_widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonIn_d_bits_corrupt = coupler_from_bus_named_fbus_widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_a_ready = coupler_from_bus_named_fbus_widget_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_anonOut_a_valid = coupler_from_bus_named_fbus_widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonOut_a_bits_opcode = coupler_from_bus_named_fbus_widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonOut_a_bits_param = coupler_from_bus_named_fbus_widget_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonOut_a_bits_size = coupler_from_bus_named_fbus_widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonOut_a_bits_source = coupler_from_bus_named_fbus_widget_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonOut_a_bits_address = coupler_from_bus_named_fbus_widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonOut_a_bits_mask = coupler_from_bus_named_fbus_widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonOut_a_bits_data = coupler_from_bus_named_fbus_widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonOut_a_bits_corrupt = coupler_from_bus_named_fbus_widget_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_anonOut_d_ready = coupler_from_bus_named_fbus_widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_d_valid = coupler_from_bus_named_fbus_widget_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_opcode = coupler_from_bus_named_fbus_widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_param = coupler_from_bus_named_fbus_widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_size = coupler_from_bus_named_fbus_widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_source = coupler_from_bus_named_fbus_widget_anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_sink = coupler_from_bus_named_fbus_widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_denied = coupler_from_bus_named_fbus_widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_data = coupler_from_bus_named_fbus_widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_d_bits_corrupt = coupler_from_bus_named_fbus_widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_bus_xingIn_a_ready = coupler_from_bus_named_fbus_bus_xingOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_a_valid = coupler_from_bus_named_fbus_bus_xingOut_a_valid; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_opcode = coupler_from_bus_named_fbus_bus_xingOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_param = coupler_from_bus_named_fbus_bus_xingOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_size = coupler_from_bus_named_fbus_bus_xingOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_source = coupler_from_bus_named_fbus_bus_xingOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_address = coupler_from_bus_named_fbus_bus_xingOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_mask = coupler_from_bus_named_fbus_bus_xingOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_data = coupler_from_bus_named_fbus_bus_xingOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_a_bits_corrupt = coupler_from_bus_named_fbus_bus_xingOut_a_bits_corrupt; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_widget_auto_anon_in_d_ready = coupler_from_bus_named_fbus_bus_xingOut_d_ready; // @[WidthWidget.scala:27:9]
assign coupler_from_bus_named_fbus_bus_xingIn_d_valid = coupler_from_bus_named_fbus_bus_xingOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingIn_d_bits_opcode = coupler_from_bus_named_fbus_bus_xingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingIn_d_bits_param = coupler_from_bus_named_fbus_bus_xingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingIn_d_bits_size = coupler_from_bus_named_fbus_bus_xingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingIn_d_bits_source = coupler_from_bus_named_fbus_bus_xingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingIn_d_bits_sink = coupler_from_bus_named_fbus_bus_xingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingIn_d_bits_denied = coupler_from_bus_named_fbus_bus_xingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingIn_d_bits_data = coupler_from_bus_named_fbus_bus_xingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingIn_d_bits_corrupt = coupler_from_bus_named_fbus_bus_xingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_auto_bus_xing_in_a_ready = coupler_from_bus_named_fbus_bus_xingIn_a_ready; // @[MixedNode.scala:551:17]
assign coupler_from_bus_named_fbus_bus_xingOut_a_valid = coupler_from_bus_named_fbus_bus_xingIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingOut_a_bits_opcode = coupler_from_bus_named_fbus_bus_xingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingOut_a_bits_param = coupler_from_bus_named_fbus_bus_xingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingOut_a_bits_size = coupler_from_bus_named_fbus_bus_xingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingOut_a_bits_source = coupler_from_bus_named_fbus_bus_xingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingOut_a_bits_address = coupler_from_bus_named_fbus_bus_xingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingOut_a_bits_mask = coupler_from_bus_named_fbus_bus_xingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingOut_a_bits_data = coupler_from_bus_named_fbus_bus_xingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingOut_a_bits_corrupt = coupler_from_bus_named_fbus_bus_xingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_bus_xingOut_d_ready = coupler_from_bus_named_fbus_bus_xingIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_bus_named_fbus_auto_bus_xing_in_d_valid = coupler_from_bus_named_fbus_bus_xingIn_d_valid; // @[MixedNode.scala:551:17]
assign coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_opcode = coupler_from_bus_named_fbus_bus_xingIn_d_bits_opcode; // @[MixedNode.scala:551:17]
assign coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_param = coupler_from_bus_named_fbus_bus_xingIn_d_bits_param; // @[MixedNode.scala:551:17]
assign coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_size = coupler_from_bus_named_fbus_bus_xingIn_d_bits_size; // @[MixedNode.scala:551:17]
assign coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_source = coupler_from_bus_named_fbus_bus_xingIn_d_bits_source; // @[MixedNode.scala:551:17]
assign coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_sink = coupler_from_bus_named_fbus_bus_xingIn_d_bits_sink; // @[MixedNode.scala:551:17]
assign coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_denied = coupler_from_bus_named_fbus_bus_xingIn_d_bits_denied; // @[MixedNode.scala:551:17]
assign coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_data = coupler_from_bus_named_fbus_bus_xingIn_d_bits_data; // @[MixedNode.scala:551:17]
assign coupler_from_bus_named_fbus_auto_bus_xing_in_d_bits_corrupt = coupler_from_bus_named_fbus_bus_xingIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_tlMasterClockXingIn_a_ready; // @[MixedNode.scala:551:17]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_a_ready_0 = coupler_from_rockettile_auto_tl_master_clock_xing_in_a_ready; // @[ClockDomain.scala:14:9]
wire coupler_from_rockettile_tlMasterClockXingIn_a_valid = coupler_from_rockettile_auto_tl_master_clock_xing_in_a_valid; // @[MixedNode.scala:551:17]
wire [2:0] coupler_from_rockettile_tlMasterClockXingIn_a_bits_opcode = coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] coupler_from_rockettile_tlMasterClockXingIn_a_bits_param = coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] coupler_from_rockettile_tlMasterClockXingIn_a_bits_size = coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_size; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_tlMasterClockXingIn_a_bits_source = coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_source; // @[MixedNode.scala:551:17]
wire [31:0] coupler_from_rockettile_tlMasterClockXingIn_a_bits_address = coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_address; // @[MixedNode.scala:551:17]
wire [7:0] coupler_from_rockettile_tlMasterClockXingIn_a_bits_mask = coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_mask; // @[MixedNode.scala:551:17]
wire [63:0] coupler_from_rockettile_tlMasterClockXingIn_a_bits_data = coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_data; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_tlMasterClockXingIn_a_bits_corrupt = coupler_from_rockettile_auto_tl_master_clock_xing_in_a_bits_corrupt; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_tlMasterClockXingIn_d_ready = coupler_from_rockettile_auto_tl_master_clock_xing_in_d_ready; // @[MixedNode.scala:551:17]
wire coupler_from_rockettile_tlMasterClockXingIn_d_valid; // @[MixedNode.scala:551:17]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_valid_0 = coupler_from_rockettile_auto_tl_master_clock_xing_in_d_valid; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_from_rockettile_tlMasterClockXingIn_d_bits_opcode; // @[MixedNode.scala:551:17]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_opcode_0 = coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_opcode; // @[ClockDomain.scala:14:9]
wire [1:0] coupler_from_rockettile_tlMasterClockXingIn_d_bits_param; // @[MixedNode.scala:551:17]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_param_0 = coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_param; // @[ClockDomain.scala:14:9]
wire [3:0] coupler_from_rockettile_tlMasterClockXingIn_d_bits_size; // @[MixedNode.scala:551:17]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_size_0 = coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_size; // @[ClockDomain.scala:14:9]
wire coupler_from_rockettile_tlMasterClockXingIn_d_bits_source; // @[MixedNode.scala:551:17]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_source_0 = coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_source; // @[ClockDomain.scala:14:9]
wire coupler_from_rockettile_tlMasterClockXingIn_d_bits_sink; // @[MixedNode.scala:551:17]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_sink_0 = coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_sink; // @[ClockDomain.scala:14:9]
wire coupler_from_rockettile_tlMasterClockXingIn_d_bits_denied; // @[MixedNode.scala:551:17]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_denied_0 = coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_denied; // @[ClockDomain.scala:14:9]
wire [63:0] coupler_from_rockettile_tlMasterClockXingIn_d_bits_data; // @[MixedNode.scala:551:17]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_data_0 = coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_data; // @[ClockDomain.scala:14:9]
wire coupler_from_rockettile_tlMasterClockXingIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_corrupt_0 = coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_corrupt; // @[ClockDomain.scala:14:9]
wire coupler_from_rockettile_tlOut_a_ready = coupler_from_rockettile_auto_tl_out_a_ready; // @[MixedNode.scala:542:17]
wire coupler_from_rockettile_tlOut_a_valid; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_1_a_valid = coupler_from_rockettile_auto_tl_out_a_valid; // @[FIFOFixer.scala:50:9]
wire [2:0] coupler_from_rockettile_tlOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_1_a_bits_opcode = coupler_from_rockettile_auto_tl_out_a_bits_opcode; // @[FIFOFixer.scala:50:9]
wire [2:0] coupler_from_rockettile_tlOut_a_bits_param; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_1_a_bits_param = coupler_from_rockettile_auto_tl_out_a_bits_param; // @[FIFOFixer.scala:50:9]
wire [3:0] coupler_from_rockettile_tlOut_a_bits_size; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_1_a_bits_size = coupler_from_rockettile_auto_tl_out_a_bits_size; // @[FIFOFixer.scala:50:9]
wire coupler_from_rockettile_tlOut_a_bits_source; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_1_a_bits_source = coupler_from_rockettile_auto_tl_out_a_bits_source; // @[FIFOFixer.scala:50:9]
wire [31:0] coupler_from_rockettile_tlOut_a_bits_address; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_1_a_bits_address = coupler_from_rockettile_auto_tl_out_a_bits_address; // @[FIFOFixer.scala:50:9]
wire [7:0] coupler_from_rockettile_tlOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_1_a_bits_mask = coupler_from_rockettile_auto_tl_out_a_bits_mask; // @[FIFOFixer.scala:50:9]
wire [63:0] coupler_from_rockettile_tlOut_a_bits_data; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_1_a_bits_data = coupler_from_rockettile_auto_tl_out_a_bits_data; // @[FIFOFixer.scala:50:9]
wire coupler_from_rockettile_tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_1_a_bits_corrupt = coupler_from_rockettile_auto_tl_out_a_bits_corrupt; // @[FIFOFixer.scala:50:9]
wire coupler_from_rockettile_tlOut_d_ready; // @[MixedNode.scala:542:17]
assign fixer_auto_anon_in_1_d_ready = coupler_from_rockettile_auto_tl_out_d_ready; // @[FIFOFixer.scala:50:9]
wire coupler_from_rockettile_tlOut_d_valid = coupler_from_rockettile_auto_tl_out_d_valid; // @[MixedNode.scala:542:17]
wire [2:0] coupler_from_rockettile_tlOut_d_bits_opcode = coupler_from_rockettile_auto_tl_out_d_bits_opcode; // @[MixedNode.scala:542:17]
wire [1:0] coupler_from_rockettile_tlOut_d_bits_param = coupler_from_rockettile_auto_tl_out_d_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] coupler_from_rockettile_tlOut_d_bits_size = coupler_from_rockettile_auto_tl_out_d_bits_size; // @[MixedNode.scala:542:17]
wire coupler_from_rockettile_tlOut_d_bits_source = coupler_from_rockettile_auto_tl_out_d_bits_source; // @[MixedNode.scala:542:17]
wire coupler_from_rockettile_tlOut_d_bits_sink = coupler_from_rockettile_auto_tl_out_d_bits_sink; // @[MixedNode.scala:542:17]
wire coupler_from_rockettile_tlOut_d_bits_denied = coupler_from_rockettile_auto_tl_out_d_bits_denied; // @[MixedNode.scala:542:17]
wire [63:0] coupler_from_rockettile_tlOut_d_bits_data = coupler_from_rockettile_auto_tl_out_d_bits_data; // @[MixedNode.scala:542:17]
wire coupler_from_rockettile_tlOut_d_bits_corrupt = coupler_from_rockettile_auto_tl_out_d_bits_corrupt; // @[MixedNode.scala:542:17]
wire coupler_from_rockettile_tlIn_a_ready = coupler_from_rockettile_tlOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlIn_a_valid; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_out_a_valid = coupler_from_rockettile_tlOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] coupler_from_rockettile_tlIn_a_bits_opcode; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_out_a_bits_opcode = coupler_from_rockettile_tlOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] coupler_from_rockettile_tlIn_a_bits_param; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_out_a_bits_param = coupler_from_rockettile_tlOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] coupler_from_rockettile_tlIn_a_bits_size; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_out_a_bits_size = coupler_from_rockettile_tlOut_a_bits_size; // @[MixedNode.scala:542:17]
wire coupler_from_rockettile_tlIn_a_bits_source; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_out_a_bits_source = coupler_from_rockettile_tlOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] coupler_from_rockettile_tlIn_a_bits_address; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_out_a_bits_address = coupler_from_rockettile_tlOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] coupler_from_rockettile_tlIn_a_bits_mask; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_out_a_bits_mask = coupler_from_rockettile_tlOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] coupler_from_rockettile_tlIn_a_bits_data; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_out_a_bits_data = coupler_from_rockettile_tlOut_a_bits_data; // @[MixedNode.scala:542:17]
wire coupler_from_rockettile_tlIn_a_bits_corrupt; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_out_a_bits_corrupt = coupler_from_rockettile_tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire coupler_from_rockettile_tlIn_d_ready; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_out_d_ready = coupler_from_rockettile_tlOut_d_ready; // @[MixedNode.scala:542:17]
wire coupler_from_rockettile_tlIn_d_valid = coupler_from_rockettile_tlOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] coupler_from_rockettile_tlIn_d_bits_opcode = coupler_from_rockettile_tlOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [1:0] coupler_from_rockettile_tlIn_d_bits_param = coupler_from_rockettile_tlOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] coupler_from_rockettile_tlIn_d_bits_size = coupler_from_rockettile_tlOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlIn_d_bits_source = coupler_from_rockettile_tlOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlIn_d_bits_sink = coupler_from_rockettile_tlOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlIn_d_bits_denied = coupler_from_rockettile_tlOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] coupler_from_rockettile_tlIn_d_bits_data = coupler_from_rockettile_tlOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlIn_d_bits_corrupt = coupler_from_rockettile_tlOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferOut_a_ready = coupler_from_rockettile_tlIn_a_ready; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferOut_a_valid; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_tlOut_a_valid = coupler_from_rockettile_tlIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] coupler_from_rockettile_no_bufferOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_tlOut_a_bits_opcode = coupler_from_rockettile_tlIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] coupler_from_rockettile_no_bufferOut_a_bits_param; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_tlOut_a_bits_param = coupler_from_rockettile_tlIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] coupler_from_rockettile_no_bufferOut_a_bits_size; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_tlOut_a_bits_size = coupler_from_rockettile_tlIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferOut_a_bits_source; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_tlOut_a_bits_source = coupler_from_rockettile_tlIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire [31:0] coupler_from_rockettile_no_bufferOut_a_bits_address; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_tlOut_a_bits_address = coupler_from_rockettile_tlIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
wire [7:0] coupler_from_rockettile_no_bufferOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_tlOut_a_bits_mask = coupler_from_rockettile_tlIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] coupler_from_rockettile_no_bufferOut_a_bits_data; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_tlOut_a_bits_data = coupler_from_rockettile_tlIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_tlOut_a_bits_corrupt = coupler_from_rockettile_tlIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferOut_d_ready; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_tlOut_d_ready = coupler_from_rockettile_tlIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferOut_d_valid = coupler_from_rockettile_tlIn_d_valid; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] coupler_from_rockettile_no_bufferOut_d_bits_opcode = coupler_from_rockettile_tlIn_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [1:0] coupler_from_rockettile_no_bufferOut_d_bits_param = coupler_from_rockettile_tlIn_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] coupler_from_rockettile_no_bufferOut_d_bits_size = coupler_from_rockettile_tlIn_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferOut_d_bits_source = coupler_from_rockettile_tlIn_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferOut_d_bits_sink = coupler_from_rockettile_tlIn_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferOut_d_bits_denied = coupler_from_rockettile_tlIn_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] coupler_from_rockettile_no_bufferOut_d_bits_data = coupler_from_rockettile_tlIn_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferOut_d_bits_corrupt = coupler_from_rockettile_tlIn_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferIn_a_ready = coupler_from_rockettile_no_bufferOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferIn_a_valid; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlIn_a_valid = coupler_from_rockettile_no_bufferOut_a_valid; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] coupler_from_rockettile_no_bufferIn_a_bits_opcode; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlIn_a_bits_opcode = coupler_from_rockettile_no_bufferOut_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] coupler_from_rockettile_no_bufferIn_a_bits_param; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlIn_a_bits_param = coupler_from_rockettile_no_bufferOut_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] coupler_from_rockettile_no_bufferIn_a_bits_size; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlIn_a_bits_size = coupler_from_rockettile_no_bufferOut_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferIn_a_bits_source; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlIn_a_bits_source = coupler_from_rockettile_no_bufferOut_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire [31:0] coupler_from_rockettile_no_bufferIn_a_bits_address; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlIn_a_bits_address = coupler_from_rockettile_no_bufferOut_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
wire [7:0] coupler_from_rockettile_no_bufferIn_a_bits_mask; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlIn_a_bits_mask = coupler_from_rockettile_no_bufferOut_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] coupler_from_rockettile_no_bufferIn_a_bits_data; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlIn_a_bits_data = coupler_from_rockettile_no_bufferOut_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferIn_a_bits_corrupt; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlIn_a_bits_corrupt = coupler_from_rockettile_no_bufferOut_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferIn_d_ready; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlIn_d_ready = coupler_from_rockettile_no_bufferOut_d_ready; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferIn_d_valid = coupler_from_rockettile_no_bufferOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] coupler_from_rockettile_no_bufferIn_d_bits_opcode = coupler_from_rockettile_no_bufferOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [1:0] coupler_from_rockettile_no_bufferIn_d_bits_param = coupler_from_rockettile_no_bufferOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] coupler_from_rockettile_no_bufferIn_d_bits_size = coupler_from_rockettile_no_bufferOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferIn_d_bits_source = coupler_from_rockettile_no_bufferOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferIn_d_bits_sink = coupler_from_rockettile_no_bufferOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferIn_d_bits_denied = coupler_from_rockettile_no_bufferOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] coupler_from_rockettile_no_bufferIn_d_bits_data = coupler_from_rockettile_no_bufferOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_no_bufferIn_d_bits_corrupt = coupler_from_rockettile_no_bufferOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlMasterClockXingOut_a_ready = coupler_from_rockettile_no_bufferIn_a_ready; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlMasterClockXingOut_a_valid; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_no_bufferOut_a_valid = coupler_from_rockettile_no_bufferIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] coupler_from_rockettile_tlMasterClockXingOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_no_bufferOut_a_bits_opcode = coupler_from_rockettile_no_bufferIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] coupler_from_rockettile_tlMasterClockXingOut_a_bits_param; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_no_bufferOut_a_bits_param = coupler_from_rockettile_no_bufferIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] coupler_from_rockettile_tlMasterClockXingOut_a_bits_size; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_no_bufferOut_a_bits_size = coupler_from_rockettile_no_bufferIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlMasterClockXingOut_a_bits_source; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_no_bufferOut_a_bits_source = coupler_from_rockettile_no_bufferIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire [31:0] coupler_from_rockettile_tlMasterClockXingOut_a_bits_address; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_no_bufferOut_a_bits_address = coupler_from_rockettile_no_bufferIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
wire [7:0] coupler_from_rockettile_tlMasterClockXingOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_no_bufferOut_a_bits_mask = coupler_from_rockettile_no_bufferIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] coupler_from_rockettile_tlMasterClockXingOut_a_bits_data; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_no_bufferOut_a_bits_data = coupler_from_rockettile_no_bufferIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlMasterClockXingOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_no_bufferOut_a_bits_corrupt = coupler_from_rockettile_no_bufferIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlMasterClockXingOut_d_ready; // @[MixedNode.scala:542:17]
assign coupler_from_rockettile_no_bufferOut_d_ready = coupler_from_rockettile_no_bufferIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlMasterClockXingOut_d_valid = coupler_from_rockettile_no_bufferIn_d_valid; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] coupler_from_rockettile_tlMasterClockXingOut_d_bits_opcode = coupler_from_rockettile_no_bufferIn_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
wire [1:0] coupler_from_rockettile_tlMasterClockXingOut_d_bits_param = coupler_from_rockettile_no_bufferIn_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] coupler_from_rockettile_tlMasterClockXingOut_d_bits_size = coupler_from_rockettile_no_bufferIn_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlMasterClockXingOut_d_bits_source = coupler_from_rockettile_no_bufferIn_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlMasterClockXingOut_d_bits_sink = coupler_from_rockettile_no_bufferIn_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlMasterClockXingOut_d_bits_denied = coupler_from_rockettile_no_bufferIn_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
wire [63:0] coupler_from_rockettile_tlMasterClockXingOut_d_bits_data = coupler_from_rockettile_no_bufferIn_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire coupler_from_rockettile_tlMasterClockXingOut_d_bits_corrupt = coupler_from_rockettile_no_bufferIn_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingIn_a_ready = coupler_from_rockettile_tlMasterClockXingOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_no_bufferIn_a_valid = coupler_from_rockettile_tlMasterClockXingOut_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_no_bufferIn_a_bits_opcode = coupler_from_rockettile_tlMasterClockXingOut_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_no_bufferIn_a_bits_param = coupler_from_rockettile_tlMasterClockXingOut_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_no_bufferIn_a_bits_size = coupler_from_rockettile_tlMasterClockXingOut_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_no_bufferIn_a_bits_source = coupler_from_rockettile_tlMasterClockXingOut_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_no_bufferIn_a_bits_address = coupler_from_rockettile_tlMasterClockXingOut_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_no_bufferIn_a_bits_mask = coupler_from_rockettile_tlMasterClockXingOut_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_no_bufferIn_a_bits_data = coupler_from_rockettile_tlMasterClockXingOut_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_no_bufferIn_a_bits_corrupt = coupler_from_rockettile_tlMasterClockXingOut_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_no_bufferIn_d_ready = coupler_from_rockettile_tlMasterClockXingOut_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingIn_d_valid = coupler_from_rockettile_tlMasterClockXingOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingIn_d_bits_opcode = coupler_from_rockettile_tlMasterClockXingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingIn_d_bits_param = coupler_from_rockettile_tlMasterClockXingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingIn_d_bits_size = coupler_from_rockettile_tlMasterClockXingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingIn_d_bits_source = coupler_from_rockettile_tlMasterClockXingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingIn_d_bits_sink = coupler_from_rockettile_tlMasterClockXingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingIn_d_bits_denied = coupler_from_rockettile_tlMasterClockXingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingIn_d_bits_data = coupler_from_rockettile_tlMasterClockXingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingIn_d_bits_corrupt = coupler_from_rockettile_tlMasterClockXingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_auto_tl_master_clock_xing_in_a_ready = coupler_from_rockettile_tlMasterClockXingIn_a_ready; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_tlMasterClockXingOut_a_valid = coupler_from_rockettile_tlMasterClockXingIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingOut_a_bits_opcode = coupler_from_rockettile_tlMasterClockXingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingOut_a_bits_param = coupler_from_rockettile_tlMasterClockXingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingOut_a_bits_size = coupler_from_rockettile_tlMasterClockXingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingOut_a_bits_source = coupler_from_rockettile_tlMasterClockXingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingOut_a_bits_address = coupler_from_rockettile_tlMasterClockXingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingOut_a_bits_mask = coupler_from_rockettile_tlMasterClockXingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingOut_a_bits_data = coupler_from_rockettile_tlMasterClockXingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingOut_a_bits_corrupt = coupler_from_rockettile_tlMasterClockXingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_tlMasterClockXingOut_d_ready = coupler_from_rockettile_tlMasterClockXingIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_from_rockettile_auto_tl_master_clock_xing_in_d_valid = coupler_from_rockettile_tlMasterClockXingIn_d_valid; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_opcode = coupler_from_rockettile_tlMasterClockXingIn_d_bits_opcode; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_param = coupler_from_rockettile_tlMasterClockXingIn_d_bits_param; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_size = coupler_from_rockettile_tlMasterClockXingIn_d_bits_size; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_source = coupler_from_rockettile_tlMasterClockXingIn_d_bits_source; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_sink = coupler_from_rockettile_tlMasterClockXingIn_d_bits_sink; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_denied = coupler_from_rockettile_tlMasterClockXingIn_d_bits_denied; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_data = coupler_from_rockettile_tlMasterClockXingIn_d_bits_data; // @[MixedNode.scala:551:17]
assign coupler_from_rockettile_auto_tl_master_clock_xing_in_d_bits_corrupt = coupler_from_rockettile_tlMasterClockXingIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
assign childClock = clockSinkNodeIn_clock; // @[MixedNode.scala:551:17]
assign childReset = clockSinkNodeIn_reset; // @[MixedNode.scala:551:17]
wire fixer__T_1 = fixer_a_first & fixer__a_first_T; // @[Decoupled.scala:51:35]
wire fixer__T_3 = fixer_d_first & fixer__T_2; // @[Decoupled.scala:51:35]
wire fixer__T_31 = fixer_a_first_1 & fixer__a_first_T_1; // @[Decoupled.scala:51:35]
wire fixer__T_33 = fixer_d_first_1 & fixer__T_32; // @[Decoupled.scala:51:35]
always @(posedge childClock) begin // @[LazyModuleImp.scala:155:31]
if (childReset) begin // @[LazyModuleImp.scala:155:31, :158:31]
fixer_a_first_counter <= 9'h0; // @[Edges.scala:229:27]
fixer_d_first_counter <= 9'h0; // @[Edges.scala:229:27]
fixer_flight_0 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_1 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_2 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_3 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_4 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_5 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_6 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_7 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_8 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_9 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_10 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_11 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_12 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_13 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_14 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_15 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_16 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_SourceIdFIFOed <= 17'h0; // @[FIFOFixer.scala:115:35]
fixer_a_first_counter_1 <= 9'h0; // @[Edges.scala:229:27]
fixer_d_first_counter_1 <= 9'h0; // @[Edges.scala:229:27]
fixer_flight_1_0 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_flight_1_1 <= 1'h0; // @[FIFOFixer.scala:79:27]
fixer_SourceIdFIFOed_1 <= 2'h0; // @[FIFOFixer.scala:115:35]
end
else begin // @[LazyModuleImp.scala:155:31]
if (fixer__a_first_T) // @[Decoupled.scala:51:35]
fixer_a_first_counter <= fixer__a_first_counter_T; // @[Edges.scala:229:27, :236:21]
if (fixer__d_first_T) // @[Decoupled.scala:51:35]
fixer_d_first_counter <= fixer__d_first_counter_T; // @[Edges.scala:229:27, :236:21]
fixer_flight_0 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h0) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h0 | fixer_flight_0); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_1 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h1) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h1 | fixer_flight_1); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_2 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h2) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h2 | fixer_flight_2); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_3 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h3) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h3 | fixer_flight_3); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_4 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h4) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h4 | fixer_flight_4); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_5 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h5) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h5 | fixer_flight_5); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_6 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h6) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h6 | fixer_flight_6); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_7 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h7) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h7 | fixer_flight_7); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_8 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h8) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h8 | fixer_flight_8); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_9 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h9) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h9 | fixer_flight_9); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_10 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hA) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hA | fixer_flight_10); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_11 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hB) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hB | fixer_flight_11); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_12 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hC) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hC | fixer_flight_12); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_13 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hD) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hD | fixer_flight_13); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_14 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hE) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hE | fixer_flight_14); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_15 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'hF) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'hF | fixer_flight_15); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_16 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 5'h10) & (fixer__T_1 & fixer_anonIn_a_bits_source == 5'h10 | fixer_flight_16); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_SourceIdFIFOed <= fixer__SourceIdFIFOed_T; // @[FIFOFixer.scala:115:35, :126:40]
if (fixer__a_first_T_1) // @[Decoupled.scala:51:35]
fixer_a_first_counter_1 <= fixer__a_first_counter_T_1; // @[Edges.scala:229:27, :236:21]
if (fixer__d_first_T_2) // @[Decoupled.scala:51:35]
fixer_d_first_counter_1 <= fixer__d_first_counter_T_1; // @[Edges.scala:229:27, :236:21]
fixer_flight_1_0 <= ~(fixer__T_33 & ~fixer_x1_anonIn_d_bits_source) & (fixer__T_31 & ~fixer_x1_anonIn_a_bits_source | fixer_flight_1_0); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_flight_1_1 <= ~(fixer__T_33 & fixer_x1_anonIn_d_bits_source) & (fixer__T_31 & fixer_x1_anonIn_a_bits_source | fixer_flight_1_1); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}]
fixer_SourceIdFIFOed_1 <= fixer__SourceIdFIFOed_T_1; // @[FIFOFixer.scala:115:35, :126:40]
end
always @(posedge)
FixedClockBroadcast_4 fixedClockNode ( // @[ClockGroup.scala:115:114]
.auto_anon_in_clock (clockGroup_auto_out_clock), // @[ClockGroup.scala:24:9]
.auto_anon_in_reset (clockGroup_auto_out_reset), // @[ClockGroup.scala:24:9]
.auto_anon_out_3_clock (auto_fixedClockNode_anon_out_2_clock_0),
.auto_anon_out_3_reset (auto_fixedClockNode_anon_out_2_reset_0),
.auto_anon_out_2_clock (auto_fixedClockNode_anon_out_1_clock_0),
.auto_anon_out_2_reset (auto_fixedClockNode_anon_out_1_reset_0),
.auto_anon_out_1_clock (auto_fixedClockNode_anon_out_0_clock_0),
.auto_anon_out_1_reset (auto_fixedClockNode_anon_out_0_reset_0),
.auto_anon_out_0_clock (clockSinkNodeIn_clock),
.auto_anon_out_0_reset (clockSinkNodeIn_reset)
); // @[ClockGroup.scala:115:114]
TLXbar_sbus_i2_o1_a32d64s6k1z4u system_bus_xbar ( // @[SystemBus.scala:47:43]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_anon_in_1_a_ready (fixer_auto_anon_out_1_a_ready),
.auto_anon_in_1_a_valid (fixer_auto_anon_out_1_a_valid), // @[FIFOFixer.scala:50:9]
.auto_anon_in_1_a_bits_opcode (fixer_auto_anon_out_1_a_bits_opcode), // @[FIFOFixer.scala:50:9]
.auto_anon_in_1_a_bits_param (fixer_auto_anon_out_1_a_bits_param), // @[FIFOFixer.scala:50:9]
.auto_anon_in_1_a_bits_size (fixer_auto_anon_out_1_a_bits_size), // @[FIFOFixer.scala:50:9]
.auto_anon_in_1_a_bits_source (fixer_auto_anon_out_1_a_bits_source), // @[FIFOFixer.scala:50:9]
.auto_anon_in_1_a_bits_address (fixer_auto_anon_out_1_a_bits_address), // @[FIFOFixer.scala:50:9]
.auto_anon_in_1_a_bits_mask (fixer_auto_anon_out_1_a_bits_mask), // @[FIFOFixer.scala:50:9]
.auto_anon_in_1_a_bits_data (fixer_auto_anon_out_1_a_bits_data), // @[FIFOFixer.scala:50:9]
.auto_anon_in_1_a_bits_corrupt (fixer_auto_anon_out_1_a_bits_corrupt), // @[FIFOFixer.scala:50:9]
.auto_anon_in_1_d_ready (fixer_auto_anon_out_1_d_ready), // @[FIFOFixer.scala:50:9]
.auto_anon_in_1_d_valid (fixer_auto_anon_out_1_d_valid),
.auto_anon_in_1_d_bits_opcode (fixer_auto_anon_out_1_d_bits_opcode),
.auto_anon_in_1_d_bits_param (fixer_auto_anon_out_1_d_bits_param),
.auto_anon_in_1_d_bits_size (fixer_auto_anon_out_1_d_bits_size),
.auto_anon_in_1_d_bits_source (fixer_auto_anon_out_1_d_bits_source),
.auto_anon_in_1_d_bits_sink (fixer_auto_anon_out_1_d_bits_sink),
.auto_anon_in_1_d_bits_denied (fixer_auto_anon_out_1_d_bits_denied),
.auto_anon_in_1_d_bits_data (fixer_auto_anon_out_1_d_bits_data),
.auto_anon_in_1_d_bits_corrupt (fixer_auto_anon_out_1_d_bits_corrupt),
.auto_anon_in_0_a_ready (fixer_auto_anon_out_0_a_ready),
.auto_anon_in_0_a_valid (fixer_auto_anon_out_0_a_valid), // @[FIFOFixer.scala:50:9]
.auto_anon_in_0_a_bits_opcode (fixer_auto_anon_out_0_a_bits_opcode), // @[FIFOFixer.scala:50:9]
.auto_anon_in_0_a_bits_param (fixer_auto_anon_out_0_a_bits_param), // @[FIFOFixer.scala:50:9]
.auto_anon_in_0_a_bits_size (fixer_auto_anon_out_0_a_bits_size), // @[FIFOFixer.scala:50:9]
.auto_anon_in_0_a_bits_source (fixer_auto_anon_out_0_a_bits_source), // @[FIFOFixer.scala:50:9]
.auto_anon_in_0_a_bits_address (fixer_auto_anon_out_0_a_bits_address), // @[FIFOFixer.scala:50:9]
.auto_anon_in_0_a_bits_mask (fixer_auto_anon_out_0_a_bits_mask), // @[FIFOFixer.scala:50:9]
.auto_anon_in_0_a_bits_data (fixer_auto_anon_out_0_a_bits_data), // @[FIFOFixer.scala:50:9]
.auto_anon_in_0_a_bits_corrupt (fixer_auto_anon_out_0_a_bits_corrupt), // @[FIFOFixer.scala:50:9]
.auto_anon_in_0_d_ready (fixer_auto_anon_out_0_d_ready), // @[FIFOFixer.scala:50:9]
.auto_anon_in_0_d_valid (fixer_auto_anon_out_0_d_valid),
.auto_anon_in_0_d_bits_opcode (fixer_auto_anon_out_0_d_bits_opcode),
.auto_anon_in_0_d_bits_param (fixer_auto_anon_out_0_d_bits_param),
.auto_anon_in_0_d_bits_size (fixer_auto_anon_out_0_d_bits_size),
.auto_anon_in_0_d_bits_source (fixer_auto_anon_out_0_d_bits_source),
.auto_anon_in_0_d_bits_sink (fixer_auto_anon_out_0_d_bits_sink),
.auto_anon_in_0_d_bits_denied (fixer_auto_anon_out_0_d_bits_denied),
.auto_anon_in_0_d_bits_data (fixer_auto_anon_out_0_d_bits_data),
.auto_anon_in_0_d_bits_corrupt (fixer_auto_anon_out_0_d_bits_corrupt),
.auto_anon_out_a_ready (coupler_to_bus_named_cbus_auto_widget_anon_in_a_ready), // @[LazyModuleImp.scala:138:7]
.auto_anon_out_a_valid (coupler_to_bus_named_cbus_auto_widget_anon_in_a_valid),
.auto_anon_out_a_bits_opcode (coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_opcode),
.auto_anon_out_a_bits_param (coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_param),
.auto_anon_out_a_bits_size (coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_size),
.auto_anon_out_a_bits_source (coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_source),
.auto_anon_out_a_bits_address (coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_address),
.auto_anon_out_a_bits_mask (coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_mask),
.auto_anon_out_a_bits_data (coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_data),
.auto_anon_out_a_bits_corrupt (coupler_to_bus_named_cbus_auto_widget_anon_in_a_bits_corrupt),
.auto_anon_out_d_ready (coupler_to_bus_named_cbus_auto_widget_anon_in_d_ready),
.auto_anon_out_d_valid (coupler_to_bus_named_cbus_auto_widget_anon_in_d_valid), // @[LazyModuleImp.scala:138:7]
.auto_anon_out_d_bits_opcode (coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_opcode), // @[LazyModuleImp.scala:138:7]
.auto_anon_out_d_bits_param (coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_param), // @[LazyModuleImp.scala:138:7]
.auto_anon_out_d_bits_size (coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_size), // @[LazyModuleImp.scala:138:7]
.auto_anon_out_d_bits_source (coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_source), // @[LazyModuleImp.scala:138:7]
.auto_anon_out_d_bits_sink (coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_sink), // @[LazyModuleImp.scala:138:7]
.auto_anon_out_d_bits_denied (coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_denied), // @[LazyModuleImp.scala:138:7]
.auto_anon_out_d_bits_data (coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_data), // @[LazyModuleImp.scala:138:7]
.auto_anon_out_d_bits_corrupt (coupler_to_bus_named_cbus_auto_widget_anon_in_d_bits_corrupt) // @[LazyModuleImp.scala:138:7]
); // @[SystemBus.scala:47:43]
TLInterconnectCoupler_sbus_to_memory_controller_port_named_tl_mem coupler_to_memory_controller_port_named_tl_mem ( // @[LazyScope.scala:98:27]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset) // @[LazyModuleImp.scala:158:31]
); // @[LazyScope.scala:98:27]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_a_ready = auto_coupler_from_rockettile_tl_master_clock_xing_in_a_ready_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_valid = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_valid_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_opcode = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_param = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_size = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_source = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_sink = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_sink_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_denied = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_denied_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_data = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_corrupt = auto_coupler_from_rockettile_tl_master_clock_xing_in_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_a_ready = auto_coupler_from_bus_named_fbus_bus_xing_in_a_ready_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_valid = auto_coupler_from_bus_named_fbus_bus_xing_in_d_valid_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_opcode = auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_param = auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_size = auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_source = auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_sink = auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_sink_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_denied = auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_denied_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_data = auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_corrupt = auto_coupler_from_bus_named_fbus_bus_xing_in_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_valid = auto_coupler_to_bus_named_cbus_bus_xing_out_a_valid_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_opcode = auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_param = auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_size = auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_source = auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_address = auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_address_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_mask = auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_mask_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_data = auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_corrupt = auto_coupler_to_bus_named_cbus_bus_xing_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_cbus_bus_xing_out_d_ready = auto_coupler_to_bus_named_cbus_bus_xing_out_d_ready_0; // @[ClockDomain.scala:14:9]
assign auto_fixedClockNode_anon_out_2_clock = auto_fixedClockNode_anon_out_2_clock_0; // @[ClockDomain.scala:14:9]
assign auto_fixedClockNode_anon_out_2_reset = auto_fixedClockNode_anon_out_2_reset_0; // @[ClockDomain.scala:14:9]
assign auto_fixedClockNode_anon_out_1_clock = auto_fixedClockNode_anon_out_1_clock_0; // @[ClockDomain.scala:14:9]
assign auto_fixedClockNode_anon_out_1_reset = auto_fixedClockNode_anon_out_1_reset_0; // @[ClockDomain.scala:14:9]
assign auto_fixedClockNode_anon_out_0_clock = auto_fixedClockNode_anon_out_0_clock_0; // @[ClockDomain.scala:14:9]
assign auto_fixedClockNode_anon_out_0_reset = auto_fixedClockNode_anon_out_0_reset_0; // @[ClockDomain.scala:14:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Debug.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.devices.debug
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.apb.{APBFanout, APBToTL}
import freechips.rocketchip.devices.debug.systembusaccess.{SBToTL, SystemBusAccessModule}
import freechips.rocketchip.devices.tilelink.{DevNullParams, TLBusBypass, TLError}
import freechips.rocketchip.diplomacy.{AddressSet, BufferParams}
import freechips.rocketchip.resources.{Description, Device, Resource, ResourceBindings, ResourceString, SimpleDevice}
import freechips.rocketchip.interrupts.{IntNexusNode, IntSinkParameters, IntSinkPortParameters, IntSourceParameters, IntSourcePortParameters, IntSyncCrossingSource, IntSyncIdentityNode}
import freechips.rocketchip.regmapper.{RegField, RegFieldAccessType, RegFieldDesc, RegFieldGroup, RegFieldWrType, RegReadFn, RegWriteFn}
import freechips.rocketchip.rocket.{CSRs, Instructions}
import freechips.rocketchip.tile.MaxHartIdBits
import freechips.rocketchip.tilelink.{TLAsyncCrossingSink, TLAsyncCrossingSource, TLBuffer, TLRegisterNode, TLXbar}
import freechips.rocketchip.util.{Annotated, AsyncBundle, AsyncQueueParams, AsyncResetSynchronizerShiftReg, FromAsyncBundle, ParameterizedBundle, ResetSynchronizerShiftReg, ToAsyncBundle}
import freechips.rocketchip.util.SeqBoolBitwiseOps
import freechips.rocketchip.util.SeqToAugmentedSeq
import freechips.rocketchip.util.BooleanToAugmentedBoolean
object DsbBusConsts {
def sbAddrWidth = 12
def sbIdWidth = 10
}
object DsbRegAddrs{
// These are used by the ROM.
def HALTED = 0x100
def GOING = 0x104
def RESUMING = 0x108
def EXCEPTION = 0x10C
def WHERETO = 0x300
// This needs to be aligned for up to lq/sq
// This shows up in HartInfo, and needs to be aligned
// to enable up to LQ/SQ instructions.
def DATA = 0x380
// We want DATA to immediately follow PROGBUF so that we can
// use them interchangeably. Leave another slot if there is an
// implicit ebreak.
def PROGBUF(cfg:DebugModuleParams) = {
val tmp = DATA - (cfg.nProgramBufferWords * 4)
if (cfg.hasImplicitEbreak) (tmp - 4) else tmp
}
// This is unused if hasImpEbreak is false, and just points to the end of the PROGBUF.
def IMPEBREAK(cfg: DebugModuleParams) = { DATA - 4 }
// We want abstract to be immediately before PROGBUF
// because we auto-generate 2 (or 5) instructions.
def ABSTRACT(cfg:DebugModuleParams) = PROGBUF(cfg) - (cfg.nAbstractInstructions * 4)
def FLAGS = 0x400
def ROMBASE = 0x800
}
/** Enumerations used both in the hardware
* and in the configuration specification.
*/
object DebugModuleAccessType extends scala.Enumeration {
type DebugModuleAccessType = Value
val Access8Bit, Access16Bit, Access32Bit, Access64Bit, Access128Bit = Value
}
object DebugAbstractCommandError extends scala.Enumeration {
type DebugAbstractCommandError = Value
val Success, ErrBusy, ErrNotSupported, ErrException, ErrHaltResume = Value
}
object DebugAbstractCommandType extends scala.Enumeration {
type DebugAbstractCommandType = Value
val AccessRegister, QuickAccess = Value
}
/** Parameters exposed to the top-level design, set based on
* external requirements, etc.
*
* This object checks that the parameters conform to the
* full specification. The implementation which receives this
* object can perform more checks on what that implementation
* actually supports.
* @param nComponents Number of components to support debugging.
* @param baseAddress Base offest for debugEntry and debugException
* @param nDMIAddrSize Size of the Debug Bus Address
* @param nAbstractDataWords Number of 32-bit words for Abstract Commands
* @param nProgamBufferWords Number of 32-bit words for Program Buffer
* @param hasBusMaster Whether or not a bus master should be included
* @param clockGate Whether or not to use dmactive as the clockgate for debug module
* @param maxSupportedSBAccess Maximum transaction size supported by System Bus Access logic.
* @param supportQuickAccess Whether or not to support the quick access command.
* @param supportHartArray Whether or not to implement the hart array register (if >1 hart).
* @param nHaltGroups Number of halt groups
* @param nExtTriggers Number of external triggers
* @param hasHartResets Feature to reset all the currently selected harts
* @param hasImplicitEbreak There is an additional RO program buffer word containing an ebreak
* @param crossingHasSafeReset Include "safe" logic in Async Crossings so that only one side needs to be reset.
*/
case class DebugModuleParams (
baseAddress : BigInt = BigInt(0),
nDMIAddrSize : Int = 7,
nProgramBufferWords: Int = 16,
nAbstractDataWords : Int = 4,
nScratch : Int = 1,
hasBusMaster : Boolean = false,
clockGate : Boolean = true,
maxSupportedSBAccess : Int = 32,
supportQuickAccess : Boolean = false,
supportHartArray : Boolean = true,
nHaltGroups : Int = 1,
nExtTriggers : Int = 0,
hasHartResets : Boolean = false,
hasImplicitEbreak : Boolean = false,
hasAuthentication : Boolean = false,
crossingHasSafeReset : Boolean = true
) {
require ((nDMIAddrSize >= 7) && (nDMIAddrSize <= 32), s"Legal DMIAddrSize is 7-32, not ${nDMIAddrSize}")
require ((nAbstractDataWords > 0) && (nAbstractDataWords <= 16), s"Legal nAbstractDataWords is 0-16, not ${nAbstractDataWords}")
require ((nProgramBufferWords >= 0) && (nProgramBufferWords <= 16), s"Legal nProgramBufferWords is 0-16, not ${nProgramBufferWords}")
require (nHaltGroups < 32, s"Legal nHaltGroups is 0-31, not ${nHaltGroups}")
require (nExtTriggers <= 16, s"Legal nExtTriggers is 0-16, not ${nExtTriggers}")
if (supportQuickAccess) {
// TODO: Check that quick access requirements are met.
}
def address = AddressSet(baseAddress, 0xFFF)
/** the base address of DM */
def atzero = (baseAddress == 0)
/** The number of generated instructions
*
* When the base address is not zero, we need more instruction also,
* more dscratch registers) to load/store memory mapped data register
* because they may no longer be directly addressible with x0 + 12-bit imm
*/
def nAbstractInstructions = if (atzero) 2 else 5
def debugEntry: BigInt = baseAddress + 0x800
def debugException: BigInt = baseAddress + 0x808
def nDscratch: Int = if (atzero) 1 else 2
}
object DefaultDebugModuleParams {
def apply(xlen:Int /*TODO , val configStringAddr: Int*/): DebugModuleParams = {
new DebugModuleParams().copy(
nAbstractDataWords = (if (xlen == 32) 1 else if (xlen == 64) 2 else 4),
maxSupportedSBAccess = xlen
)
}
}
case object DebugModuleKey extends Field[Option[DebugModuleParams]](Some(DebugModuleParams()))
/** Functional parameters exposed to the design configuration.
*
* hartIdToHartSel: For systems where hart ids are not 1:1 with hartsel, provide the mapping.
* hartSelToHartId: Provide inverse mapping of the above
*/
case class DebugModuleHartSelFuncs (
hartIdToHartSel : (UInt) => UInt = (x:UInt) => x,
hartSelToHartId : (UInt) => UInt = (x:UInt) => x
)
case object DebugModuleHartSelKey extends Field(DebugModuleHartSelFuncs())
class DebugExtTriggerOut (val nExtTriggers: Int) extends Bundle {
val req = Output(UInt(nExtTriggers.W))
val ack = Input(UInt(nExtTriggers.W))
}
class DebugExtTriggerIn (val nExtTriggers: Int) extends Bundle {
val req = Input(UInt(nExtTriggers.W))
val ack = Output(UInt(nExtTriggers.W))
}
class DebugExtTriggerIO () (implicit val p: Parameters) extends ParameterizedBundle()(p) {
val out = new DebugExtTriggerOut(p(DebugModuleKey).get.nExtTriggers)
val in = new DebugExtTriggerIn (p(DebugModuleKey).get.nExtTriggers)
}
class DebugAuthenticationIO () (implicit val p: Parameters) extends ParameterizedBundle()(p) {
val dmactive = Output(Bool())
val dmAuthWrite = Output(Bool())
val dmAuthRead = Output(Bool())
val dmAuthWdata = Output(UInt(32.W))
val dmAuthBusy = Input(Bool())
val dmAuthRdata = Input(UInt(32.W))
val dmAuthenticated = Input(Bool())
}
// *****************************************
// Module Interfaces
//
// *****************************************
/** Control signals for Inner, generated in Outer
* {{{
* run control: resumreq, ackhavereset, halt-on-reset mask
* hart select: hasel, hartsel and the hart array mask
* }}}
*/
class DebugInternalBundle (val nComponents: Int)(implicit val p: Parameters) extends ParameterizedBundle()(p) {
/** resume request */
val resumereq = Bool()
/** hart select */
val hartsel = UInt(10.W)
/** reset acknowledge */
val ackhavereset = Bool()
/** hart array enable */
val hasel = Bool()
/** hart array mask */
val hamask = Vec(nComponents, Bool())
/** halt-on-reset mask */
val hrmask = Vec(nComponents, Bool())
}
/** structure for top-level Debug Module signals which aren't the bus interfaces. */
class DebugCtrlBundle (nComponents: Int)(implicit val p: Parameters) extends ParameterizedBundle()(p) {
/** debug availability status for all harts */
val debugUnavail = Input(Vec(nComponents, Bool()))
/** reset signal
*
* for every part of the hardware platform,
* including every hart, except for the DM and any
* logic required to access the DM
*/
val ndreset = Output(Bool())
/** reset signal for the DM itself */
val dmactive = Output(Bool())
/** dmactive acknowlege */
val dmactiveAck = Input(Bool())
}
// *****************************************
// Debug Module
//
// *****************************************
/** Parameterized version of the Debug Module defined in the
* RISC-V Debug Specification
*
* DebugModule is a slave to two asynchronous masters:
* The Debug Bus (DMI) -- This is driven by an external debugger
*
* The System Bus -- This services requests from the cores. Generally
* this interface should only be active at the request
* of the debugger, but the Debug Module may also
* provide the default MTVEC since it is mapped
* to address 0x0.
*
* DebugModule is responsible for control registers and RAM, and
* Debug ROM. It runs partially off of the dmiClk (e.g. TCK) and
* the TL clock. Therefore, it is divided into "Outer" portion (running
* off dmiClock and dmiReset) and "Inner" (running off tl_clock and tl_reset).
* This allows DMCONTROL.haltreq, hartsel, hasel, hawindowsel, hawindow, dmactive,
* and ndreset to be modified even while the Core is in reset or not being clocked.
* Not all reads from the Debugger to the Debug Module will actually complete
* in these scenarios either, they will just block until tl_clock and tl_reset
* allow them to complete. This is not strictly necessary for
* proper debugger functionality.
*/
// Local reg mapper function : Notify when written, but give the value as well.
object WNotifyWire {
def apply(n: Int, value: UInt, set: Bool, name: String, desc: String) : RegField = {
RegField(n, 0.U, RegWriteFn((valid, data) => {
set := valid
value := data
true.B
}), Some(RegFieldDesc(name = name, desc = desc,
access = RegFieldAccessType.W)))
}
}
// Local reg mapper function : Notify when accessed either as read or write.
object RWNotify {
def apply (n: Int, rVal: UInt, wVal: UInt, rNotify: Bool, wNotify: Bool, desc: Option[RegFieldDesc] = None): RegField = {
RegField(n,
RegReadFn ((ready) => {rNotify := ready ; (true.B, rVal)}),
RegWriteFn((valid, data) => {
wNotify := valid
when (valid) {wVal := data}
true.B
}
), desc)
}
}
// Local reg mapper function : Notify with value when written, take read input as presented.
// This allows checking or correcting the write value before storing it in the register field.
object WNotifyVal {
def apply(n: Int, rVal: UInt, wVal: UInt, wNotify: Bool, desc: RegFieldDesc): RegField = {
RegField(n, rVal, RegWriteFn((valid, data) => {
wNotify := valid
wVal := data
true.B
}
), desc)
}
}
class TLDebugModuleOuter(device: Device)(implicit p: Parameters) extends LazyModule {
// For Shorter Register Names
import DMI_RegAddrs._
val cfg = p(DebugModuleKey).get
val intnode = IntNexusNode(
sourceFn = { _ => IntSourcePortParameters(Seq(IntSourceParameters(1, Seq(Resource(device, "int"))))) },
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) },
outputRequiresInput = false)
val dmiNode = TLRegisterNode (
address = AddressSet.misaligned(DMI_DMCONTROL << 2, 4) ++
AddressSet.misaligned(DMI_HARTINFO << 2, 4) ++
AddressSet.misaligned(DMI_HAWINDOWSEL << 2, 4) ++
AddressSet.misaligned(DMI_HAWINDOW << 2, 4),
device = device,
beatBytes = 4,
executable = false
)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
require (intnode.edges.in.size == 0, "Debug Module does not accept interrupts")
val nComponents = intnode.out.size
def getNComponents = () => nComponents
val supportHartArray = cfg.supportHartArray && (nComponents > 1) // no hart array if only one hart
val io = IO(new Bundle {
/** structure for top-level Debug Module signals which aren't the bus interfaces. */
val ctrl = (new DebugCtrlBundle(nComponents))
/** control signals for Inner, generated in Outer */
val innerCtrl = new DecoupledIO(new DebugInternalBundle(nComponents))
/** debug interruption from Inner to Outer
*
* contains 2 type of debug interruption causes:
* - halt group
* - halt-on-reset
*/
val hgDebugInt = Input(Vec(nComponents, Bool()))
/** hart reset request to core */
val hartResetReq = cfg.hasHartResets.option(Output(Vec(nComponents, Bool())))
/** authentication support */
val dmAuthenticated = cfg.hasAuthentication.option(Input(Bool()))
})
val omRegMap = withReset(reset.asAsyncReset) {
// FIXME: Instead of casting reset to ensure it is Async, assert/require reset.Type == AsyncReset (when this feature is available)
val dmAuthenticated = io.dmAuthenticated.map( dma =>
ResetSynchronizerShiftReg(in=dma, sync=3, name=Some("dmAuthenticated_sync"))).getOrElse(true.B)
//----DMCONTROL (The whole point of 'Outer' is to maintain this register on dmiClock (e.g. TCK) domain, so that it
// can be written even if 'Inner' is not being clocked or is in reset. This allows halting
// harts while the rest of the system is in reset. It doesn't really allow any other
// register accesses, which will keep returning 'busy' to the debugger interface.
val DMCONTROLReset = WireInit(0.U.asTypeOf(new DMCONTROLFields()))
val DMCONTROLNxt = WireInit(0.U.asTypeOf(new DMCONTROLFields()))
val DMCONTROLReg = RegNext(next=DMCONTROLNxt, init=0.U.asTypeOf(DMCONTROLNxt)).suggestName("DMCONTROLReg")
val hartsel_mask = if (nComponents > 1) ((1 << p(MaxHartIdBits)) - 1).U else 0.U
val DMCONTROLWrData = WireInit(0.U.asTypeOf(new DMCONTROLFields()))
val dmactiveWrEn = WireInit(false.B)
val ndmresetWrEn = WireInit(false.B)
val clrresethaltreqWrEn = WireInit(false.B)
val setresethaltreqWrEn = WireInit(false.B)
val hartselloWrEn = WireInit(false.B)
val haselWrEn = WireInit(false.B)
val ackhaveresetWrEn = WireInit(false.B)
val hartresetWrEn = WireInit(false.B)
val resumereqWrEn = WireInit(false.B)
val haltreqWrEn = WireInit(false.B)
val dmactive = DMCONTROLReg.dmactive
DMCONTROLNxt := DMCONTROLReg
when (~dmactive) {
DMCONTROLNxt := DMCONTROLReset
} .otherwise {
when (dmAuthenticated && ndmresetWrEn) { DMCONTROLNxt.ndmreset := DMCONTROLWrData.ndmreset }
when (dmAuthenticated && hartselloWrEn) { DMCONTROLNxt.hartsello := DMCONTROLWrData.hartsello & hartsel_mask}
when (dmAuthenticated && haselWrEn) { DMCONTROLNxt.hasel := DMCONTROLWrData.hasel }
when (dmAuthenticated && hartresetWrEn) { DMCONTROLNxt.hartreset := DMCONTROLWrData.hartreset }
when (dmAuthenticated && haltreqWrEn) { DMCONTROLNxt.haltreq := DMCONTROLWrData.haltreq }
}
// Put this last to override its own effects.
when (dmactiveWrEn) {
DMCONTROLNxt.dmactive := DMCONTROLWrData.dmactive
}
//----HARTINFO
// DATA registers are mapped to memory. The dataaddr field of HARTINFO has only
// 12 bits and assumes the DM base is 0. If not at 0, then HARTINFO reads as 0
// (implying nonexistence according to the Debug Spec).
val HARTINFORdData = WireInit(0.U.asTypeOf(new HARTINFOFields()))
if (cfg.atzero) when (dmAuthenticated) {
HARTINFORdData.dataaccess := true.B
HARTINFORdData.datasize := cfg.nAbstractDataWords.U
HARTINFORdData.dataaddr := DsbRegAddrs.DATA.U
HARTINFORdData.nscratch := cfg.nScratch.U
}
//--------------------------------------------------------------
// Hart array mask and window
// hamask is hart array mask(1 bit per component), which doesn't include the hart selected by dmcontrol.hartsello
// HAWINDOWSEL selects a 32-bit slice of HAMASK to be visible for read/write in HAWINDOW
//--------------------------------------------------------------
val hamask = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
def haWindowSize = 32
// The following need to be declared even if supportHartArray is false due to reference
// at compile time by dmiNode.regmap
val HAWINDOWSELWrData = WireInit(0.U.asTypeOf(new HAWINDOWSELFields()))
val HAWINDOWSELWrEn = WireInit(false.B)
val HAWINDOWRdData = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAWINDOWWrData = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAWINDOWWrEn = WireInit(false.B)
/** whether the hart is selected */
def hartSelected(hart: Int): Bool = {
((io.innerCtrl.bits.hartsel === hart.U) ||
(if (supportHartArray) io.innerCtrl.bits.hasel && io.innerCtrl.bits.hamask(hart) else false.B))
}
val HAWINDOWSELNxt = WireInit(0.U.asTypeOf(new HAWINDOWSELFields()))
val HAWINDOWSELReg = RegNext(next=HAWINDOWSELNxt, init=0.U.asTypeOf(HAWINDOWSELNxt))
if (supportHartArray) {
val HAWINDOWSELReset = WireInit(0.U.asTypeOf(new HAWINDOWSELFields()))
HAWINDOWSELNxt := HAWINDOWSELReg
when (~dmactive || ~dmAuthenticated) {
HAWINDOWSELNxt := HAWINDOWSELReset
} .otherwise {
when (HAWINDOWSELWrEn) {
// Unneeded upper bits of HAWINDOWSEL are tied to 0. Entire register is 0 if all harts fit in one window
if (nComponents > haWindowSize) {
HAWINDOWSELNxt.hawindowsel := HAWINDOWSELWrData.hawindowsel & ((1 << (log2Up(nComponents) - 5)) - 1).U
} else {
HAWINDOWSELNxt.hawindowsel := 0.U
}
}
}
val numHAMASKSlices = ((nComponents - 1)/haWindowSize)+1
HAWINDOWRdData.maskdata := 0.U // default, overridden below
// for each slice,use a hamaskReg to store the selection info
for (ii <- 0 until numHAMASKSlices) {
val sliceMask = if (nComponents > ((ii*haWindowSize) + haWindowSize-1)) (BigInt(1) << haWindowSize) - 1 // All harts in this slice exist
else (BigInt(1)<<(nComponents - (ii*haWindowSize))) - 1 // Partial last slice
val HAMASKRst = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAMASKNxt = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAMASKReg = RegNext(next=HAMASKNxt, init=0.U.asTypeOf(HAMASKNxt))
when (ii.U === HAWINDOWSELReg.hawindowsel) {
HAWINDOWRdData.maskdata := HAMASKReg.asUInt & sliceMask.U
}
HAMASKNxt.maskdata := HAMASKReg.asUInt
when (~dmactive || ~dmAuthenticated) {
HAMASKNxt := HAMASKRst
}.otherwise {
when (HAWINDOWWrEn && (ii.U === HAWINDOWSELReg.hawindowsel)) {
HAMASKNxt.maskdata := HAWINDOWWrData.maskdata
}
}
// drive each slice of hamask with stored HAMASKReg or with new value being written
for (jj <- 0 until haWindowSize) {
if (((ii*haWindowSize) + jj) < nComponents) {
val tempWrData = HAWINDOWWrData.maskdata.asBools
val tempMaskReg = HAMASKReg.asUInt.asBools
when (HAWINDOWWrEn && (ii.U === HAWINDOWSELReg.hawindowsel)) {
hamask(ii*haWindowSize + jj) := tempWrData(jj)
}.otherwise {
hamask(ii*haWindowSize + jj) := tempMaskReg(jj)
}
}
}
}
}
//--------------------------------------------------------------
// Halt-on-reset
// hrmaskReg is current set of harts that should halt-on-reset
// Reset state (dmactive=0) is all zeroes
// Bits are set by writing 1 to DMCONTROL.setresethaltreq
// Bits are cleared by writing 1 to DMCONTROL.clrresethaltreq
// Spec says if both are 1, then clrresethaltreq is executed
// hrmask is the halt-on-reset mask which will be sent to inner
//--------------------------------------------------------------
val hrmask = Wire(Vec(nComponents, Bool()))
val hrmaskNxt = Wire(Vec(nComponents, Bool()))
val hrmaskReg = RegNext(next=hrmaskNxt, init=0.U.asTypeOf(hrmaskNxt)).suggestName("hrmaskReg")
hrmaskNxt := hrmaskReg
for (component <- 0 until nComponents) {
when (~dmactive || ~dmAuthenticated) {
hrmaskNxt(component) := false.B
}.elsewhen (clrresethaltreqWrEn && DMCONTROLWrData.clrresethaltreq && hartSelected(component)) {
hrmaskNxt(component) := false.B
}.elsewhen (setresethaltreqWrEn && DMCONTROLWrData.setresethaltreq && hartSelected(component)) {
hrmaskNxt(component) := true.B
}
}
hrmask := hrmaskNxt
val dmControlRegFields = RegFieldGroup("dmcontrol", Some("debug module control register"), Seq(
WNotifyVal(1, DMCONTROLReg.dmactive & io.ctrl.dmactiveAck, DMCONTROLWrData.dmactive, dmactiveWrEn,
RegFieldDesc("dmactive", "debug module active", reset=Some(0))),
WNotifyVal(1, DMCONTROLReg.ndmreset, DMCONTROLWrData.ndmreset, ndmresetWrEn,
RegFieldDesc("ndmreset", "debug module reset output", reset=Some(0))),
WNotifyVal(1, 0.U, DMCONTROLWrData.clrresethaltreq, clrresethaltreqWrEn,
RegFieldDesc("clrresethaltreq", "clear reset halt request", reset=Some(0), access=RegFieldAccessType.W)),
WNotifyVal(1, 0.U, DMCONTROLWrData.setresethaltreq, setresethaltreqWrEn,
RegFieldDesc("setresethaltreq", "set reset halt request", reset=Some(0), access=RegFieldAccessType.W)),
RegField(12),
if (nComponents > 1) WNotifyVal(p(MaxHartIdBits),
DMCONTROLReg.hartsello, DMCONTROLWrData.hartsello, hartselloWrEn,
RegFieldDesc("hartsello", "hart select low", reset=Some(0)))
else RegField(1),
if (nComponents > 1) RegField(10-p(MaxHartIdBits))
else RegField(9),
if (supportHartArray)
WNotifyVal(1, DMCONTROLReg.hasel, DMCONTROLWrData.hasel, haselWrEn,
RegFieldDesc("hasel", "hart array select", reset=Some(0)))
else RegField(1),
RegField(1),
WNotifyVal(1, 0.U, DMCONTROLWrData.ackhavereset, ackhaveresetWrEn,
RegFieldDesc("ackhavereset", "acknowledge reset", reset=Some(0), access=RegFieldAccessType.W)),
if (cfg.hasHartResets)
WNotifyVal(1, DMCONTROLReg.hartreset, DMCONTROLWrData.hartreset, hartresetWrEn,
RegFieldDesc("hartreset", "hart reset request", reset=Some(0)))
else RegField(1),
WNotifyVal(1, 0.U, DMCONTROLWrData.resumereq, resumereqWrEn,
RegFieldDesc("resumereq", "resume request", reset=Some(0), access=RegFieldAccessType.W)),
WNotifyVal(1, DMCONTROLReg.haltreq, DMCONTROLWrData.haltreq, haltreqWrEn, // Spec says W, but maintaining previous behavior
RegFieldDesc("haltreq", "halt request", reset=Some(0)))
))
val hartinfoRegFields = RegFieldGroup("dmi_hartinfo", Some("hart information"), Seq(
RegField.r(12, HARTINFORdData.dataaddr, RegFieldDesc("dataaddr", "data address", reset=Some(if (cfg.atzero) DsbRegAddrs.DATA else 0))),
RegField.r(4, HARTINFORdData.datasize, RegFieldDesc("datasize", "number of DATA registers", reset=Some(if (cfg.atzero) cfg.nAbstractDataWords else 0))),
RegField.r(1, HARTINFORdData.dataaccess, RegFieldDesc("dataaccess", "data access type", reset=Some(if (cfg.atzero) 1 else 0))),
RegField(3),
RegField.r(4, HARTINFORdData.nscratch, RegFieldDesc("nscratch", "number of scratch registers", reset=Some(if (cfg.atzero) cfg.nScratch else 0)))
))
//--------------------------------------------------------------
// DMI register decoder for Outer
//--------------------------------------------------------------
// regmap addresses are byte offsets from lowest address
def DMI_DMCONTROL_OFFSET = 0
def DMI_HARTINFO_OFFSET = ((DMI_HARTINFO - DMI_DMCONTROL) << 2)
def DMI_HAWINDOWSEL_OFFSET = ((DMI_HAWINDOWSEL - DMI_DMCONTROL) << 2)
def DMI_HAWINDOW_OFFSET = ((DMI_HAWINDOW - DMI_DMCONTROL) << 2)
val omRegMap = dmiNode.regmap(
DMI_DMCONTROL_OFFSET -> dmControlRegFields,
DMI_HARTINFO_OFFSET -> hartinfoRegFields,
DMI_HAWINDOWSEL_OFFSET -> (if (supportHartArray && (nComponents > 32)) Seq(
WNotifyVal(log2Up(nComponents)-5, HAWINDOWSELReg.hawindowsel, HAWINDOWSELWrData.hawindowsel, HAWINDOWSELWrEn,
RegFieldDesc("hawindowsel", "hart array window select", reset=Some(0)))) else Nil),
DMI_HAWINDOW_OFFSET -> (if (supportHartArray) Seq(
WNotifyVal(if (nComponents > 31) 32 else nComponents, HAWINDOWRdData.maskdata, HAWINDOWWrData.maskdata, HAWINDOWWrEn,
RegFieldDesc("hawindow", "hart array window", reset=Some(0), volatile=(nComponents > 32)))) else Nil)
)
//--------------------------------------------------------------
// Interrupt Registers
//--------------------------------------------------------------
val debugIntNxt = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
val debugIntRegs = RegNext(next=debugIntNxt, init=0.U.asTypeOf(debugIntNxt)).suggestName("debugIntRegs")
debugIntNxt := debugIntRegs
val (intnode_out, _) = intnode.out.unzip
for (component <- 0 until nComponents) {
intnode_out(component)(0) := debugIntRegs(component) | io.hgDebugInt(component)
}
// sends debug interruption to Core when dmcs.haltreq is set,
for (component <- 0 until nComponents) {
when (~dmactive || ~dmAuthenticated) {
debugIntNxt(component) := false.B
}. otherwise {
when (haltreqWrEn && ((DMCONTROLWrData.hartsello === component.U)
|| (if (supportHartArray) DMCONTROLWrData.hasel && hamask(component) else false.B))) {
debugIntNxt(component) := DMCONTROLWrData.haltreq
}
}
}
// Halt request registers are set & cleared by writes to DMCONTROL.haltreq
// resumereq also causes the core to execute a 'dret',
// so resumereq is passed through to Inner.
// hartsel/hasel/hamask must also be used by the DebugModule state machine,
// so it is passed to Inner.
// These registers ensure that requests to dmInner are not lost if inner clock isn't running or requests occur too close together.
// If the innerCtrl async queue is not ready, the notification will be posted and held until ready is received.
// Additional notifications that occur while one is already waiting update the pending data so that the last value written is sent.
// Volatile events resumereq and ackhavereset are registered when they occur and remain pending until ready is received.
val innerCtrlValid = Wire(Bool())
val innerCtrlValidReg = RegInit(false.B).suggestName("innerCtrlValidReg")
val innerCtrlResumeReqReg = RegInit(false.B).suggestName("innerCtrlResumeReqReg")
val innerCtrlAckHaveResetReg = RegInit(false.B).suggestName("innerCtrlAckHaveResetReg")
innerCtrlValid := hartselloWrEn | resumereqWrEn | ackhaveresetWrEn | setresethaltreqWrEn | clrresethaltreqWrEn | haselWrEn |
(HAWINDOWWrEn & supportHartArray.B)
innerCtrlValidReg := io.innerCtrl.valid & ~io.innerCtrl.ready // Hold innerctrl request until the async queue accepts it
innerCtrlResumeReqReg := io.innerCtrl.bits.resumereq & ~io.innerCtrl.ready // Hold resumereq until accepted
innerCtrlAckHaveResetReg := io.innerCtrl.bits.ackhavereset & ~io.innerCtrl.ready // Hold ackhavereset until accepted
io.innerCtrl.valid := innerCtrlValid | innerCtrlValidReg
io.innerCtrl.bits.hartsel := Mux(hartselloWrEn, DMCONTROLWrData.hartsello, DMCONTROLReg.hartsello)
io.innerCtrl.bits.resumereq := (resumereqWrEn & DMCONTROLWrData.resumereq) | innerCtrlResumeReqReg
io.innerCtrl.bits.ackhavereset := (ackhaveresetWrEn & DMCONTROLWrData.ackhavereset) | innerCtrlAckHaveResetReg
io.innerCtrl.bits.hrmask := hrmask
if (supportHartArray) {
io.innerCtrl.bits.hasel := Mux(haselWrEn, DMCONTROLWrData.hasel, DMCONTROLReg.hasel)
io.innerCtrl.bits.hamask := hamask
} else {
io.innerCtrl.bits.hasel := DontCare
io.innerCtrl.bits.hamask := DontCare
}
io.ctrl.ndreset := DMCONTROLReg.ndmreset
io.ctrl.dmactive := DMCONTROLReg.dmactive
// hart reset mechanism implementation
if (cfg.hasHartResets) {
val hartResetNxt = Wire(Vec(nComponents, Bool()))
val hartResetReg = RegNext(next=hartResetNxt, init=0.U.asTypeOf(hartResetNxt))
for (component <- 0 until nComponents) {
hartResetNxt(component) := DMCONTROLReg.hartreset & hartSelected(component)
io.hartResetReq.get(component) := hartResetReg(component)
}
}
omRegMap // FIXME: Remove this when withReset is removed
}}
}
// wrap a Outer with a DMIToTL, derived by dmi clock & reset
class TLDebugModuleOuterAsync(device: Device)(implicit p: Parameters) extends LazyModule {
val cfg = p(DebugModuleKey).get
val dmiXbar = LazyModule (new TLXbar(nameSuffix = Some("dmixbar")))
val dmi2tlOpt = (!p(ExportDebug).apb).option({
val dmi2tl = LazyModule(new DMIToTL())
dmiXbar.node := dmi2tl.node
dmi2tl
})
val apbNodeOpt = p(ExportDebug).apb.option({
val apb2tl = LazyModule(new APBToTL())
val apb2tlBuffer = LazyModule(new TLBuffer(BufferParams.pipe))
val dmTopAddr = (1 << cfg.nDMIAddrSize) << 2
val tlErrorParams = DevNullParams(AddressSet.misaligned(dmTopAddr, APBDebugConsts.apbDebugRegBase-dmTopAddr), maxAtomic=0, maxTransfer=4)
val tlError = LazyModule(new TLError(tlErrorParams, buffer=false))
val apbXbar = LazyModule(new APBFanout())
val apbRegs = LazyModule(new APBDebugRegisters())
apbRegs.node := apbXbar.node
apb2tl.node := apbXbar.node
apb2tlBuffer.node := apb2tl.node
dmiXbar.node := apb2tlBuffer.node
tlError.node := dmiXbar.node
apbXbar.node
})
val dmOuter = LazyModule( new TLDebugModuleOuter(device))
val intnode = IntSyncIdentityNode()
intnode :*= IntSyncCrossingSource(alreadyRegistered = true) :*= dmOuter.intnode
val dmiBypass = LazyModule(new TLBusBypass(beatBytes=4, bufferError=false, maxAtomic=0, maxTransfer=4))
val dmiInnerNode = TLAsyncCrossingSource() := dmiBypass.node := dmiXbar.node
dmOuter.dmiNode := dmiXbar.node
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val nComponents = dmOuter.intnode.edges.out.size
val io = IO(new Bundle {
val dmi_clock = Input(Clock())
val dmi_reset = Input(Reset())
/** Debug Module Interface bewteen DM and DTM
*
* The DTM provides access to one or more Debug Modules (DMs) using DMI
*/
val dmi = (!p(ExportDebug).apb).option(Flipped(new DMIIO()(p)))
// Optional APB Interface is fully diplomatic so is not listed here.
val ctrl = new DebugCtrlBundle(nComponents)
/** conrol signals for Inner, generated in Outer */
val innerCtrl = new AsyncBundle(new DebugInternalBundle(nComponents), AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset))
/** debug interruption generated in Inner */
val hgDebugInt = Input(Vec(nComponents, Bool()))
/** hart reset request to core */
val hartResetReq = p(DebugModuleKey).get.hasHartResets.option(Output(Vec(nComponents, Bool())))
/** Authentication signal from core */
val dmAuthenticated = p(DebugModuleKey).get.hasAuthentication.option(Input(Bool()))
})
val rf_reset = IO(Input(Reset())) // RF transform
childClock := io.dmi_clock
childReset := io.dmi_reset
override def provideImplicitClockToLazyChildren = true
withClockAndReset(childClock, childReset) {
dmi2tlOpt.foreach { _.module.io.dmi <> io.dmi.get }
val dmactiveAck = AsyncResetSynchronizerShiftReg(in=io.ctrl.dmactiveAck, sync=3, name=Some("dmactiveAckSync"))
dmiBypass.module.io.bypass := ~io.ctrl.dmactive | ~dmactiveAck
io.ctrl <> dmOuter.module.io.ctrl
dmOuter.module.io.ctrl.dmactiveAck := dmactiveAck // send synced version down to dmOuter
io.innerCtrl <> ToAsyncBundle(dmOuter.module.io.innerCtrl, AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset))
dmOuter.module.io.hgDebugInt := io.hgDebugInt
io.hartResetReq.foreach { x => dmOuter.module.io.hartResetReq.foreach {y => x := y}}
io.dmAuthenticated.foreach { x => dmOuter.module.io.dmAuthenticated.foreach { y => y := x}}
}
}
}
class TLDebugModuleInner(device: Device, getNComponents: () => Int, beatBytes: Int)(implicit p: Parameters) extends LazyModule
{
// For Shorter Register Names
import DMI_RegAddrs._
val cfg = p(DebugModuleKey).get
def getCfg = () => cfg
val dmTopAddr = (1 << cfg.nDMIAddrSize) << 2
/** dmiNode address set */
val dmiNode = TLRegisterNode(
// Address is range 0 to 0x1FF except DMCONTROL, HARTINFO, HAWINDOWSEL, HAWINDOW which are handled by Outer
address = AddressSet.misaligned(0, DMI_DMCONTROL << 2) ++
AddressSet.misaligned((DMI_DMCONTROL + 1) << 2, ((DMI_HARTINFO << 2) - ((DMI_DMCONTROL + 1) << 2))) ++
AddressSet.misaligned((DMI_HARTINFO + 1) << 2, ((DMI_HAWINDOWSEL << 2) - ((DMI_HARTINFO + 1) << 2))) ++
AddressSet.misaligned((DMI_HAWINDOW + 1) << 2, (dmTopAddr - ((DMI_HAWINDOW + 1) << 2))),
device = device,
beatBytes = 4,
executable = false
)
val tlNode = TLRegisterNode(
address=Seq(cfg.address),
device=device,
beatBytes=beatBytes,
executable=true
)
val sb2tlOpt = cfg.hasBusMaster.option(LazyModule(new SBToTL()))
// If we want to support custom registers read through Abstract Commands,
// provide a place to bring them into the debug module. What this connects
// to is up to the implementation.
val customNode = new DebugCustomSink()
lazy val module = new Impl
class Impl extends LazyModuleImp(this){
val nComponents = getNComponents()
Annotated.params(this, cfg)
val supportHartArray = cfg.supportHartArray & (nComponents > 1)
val nExtTriggers = cfg.nExtTriggers
val nHaltGroups = if ((nComponents > 1) | (nExtTriggers > 0)) cfg.nHaltGroups
else 0 // no halt groups possible if single hart with no external triggers
val hartSelFuncs = if (getNComponents() > 1) p(DebugModuleHartSelKey) else DebugModuleHartSelFuncs(
hartIdToHartSel = (x) => 0.U,
hartSelToHartId = (x) => x
)
val io = IO(new Bundle {
/** dm reset signal passed in from Outer */
val dmactive = Input(Bool())
/** conrol signals for Inner
*
* it's generated by Outer and comes in
*/
val innerCtrl = Flipped(new DecoupledIO(new DebugInternalBundle(nComponents)))
/** debug unavail signal passed in from Outer*/
val debugUnavail = Input(Vec(nComponents, Bool()))
/** debug interruption from Inner to Outer
*
* contain 2 type of debug interruption causes:
* - halt group
* - halt-on-reset
*/
val hgDebugInt = Output(Vec(nComponents, Bool()))
/** interface for trigger */
val extTrigger = (nExtTriggers > 0).option(new DebugExtTriggerIO())
/** vector to indicate which hart is in reset
*
* dm receives it from core and sends it to Inner
*/
val hartIsInReset = Input(Vec(nComponents, Bool()))
val tl_clock = Input(Clock())
val tl_reset = Input(Reset())
/** Debug Authentication signals from core */
val auth = cfg.hasAuthentication.option(new DebugAuthenticationIO())
})
sb2tlOpt.map { sb =>
sb.module.clock := io.tl_clock
sb.module.reset := io.tl_reset
sb.module.rf_reset := io.tl_reset
}
//--------------------------------------------------------------
// Import constants for shorter variable names
//--------------------------------------------------------------
import DMI_RegAddrs._
import DsbRegAddrs._
import DsbBusConsts._
//--------------------------------------------------------------
// Sanity Check Configuration For this implementation.
//--------------------------------------------------------------
require (cfg.supportQuickAccess == false, "No Quick Access support yet")
require ((nHaltGroups > 0) || (nExtTriggers == 0), "External triggers require at least 1 halt group")
//--------------------------------------------------------------
// Register & Wire Declarations (which need to be pre-declared)
//--------------------------------------------------------------
// run control regs: tracking all the harts
// implements: see implementation-specific bits part
/** all harts halted status */
val haltedBitRegs = Reg(UInt(nComponents.W))
/** all harts resume request status */
val resumeReqRegs = Reg(UInt(nComponents.W))
/** all harts have reset status */
val haveResetBitRegs = Reg(UInt(nComponents.W))
// default is 1,after resume, resumeAcks get 0
/** all harts resume ack status */
val resumeAcks = Wire(UInt(nComponents.W))
// --- regmapper outputs
// hart state Id and En
// in Hart Bus Access ROM
val hartHaltedWrEn = Wire(Bool())
val hartHaltedId = Wire(UInt(sbIdWidth.W))
val hartGoingWrEn = Wire(Bool())
val hartGoingId = Wire(UInt(sbIdWidth.W))
val hartResumingWrEn = Wire(Bool())
val hartResumingId = Wire(UInt(sbIdWidth.W))
val hartExceptionWrEn = Wire(Bool())
val hartExceptionId = Wire(UInt(sbIdWidth.W))
// progbuf and abstract data: byte-addressable control logic
// AccessLegal is set only when state = waiting
// RdEn and WrEnMaybe : contrl signal drived by DMI bus
val dmiProgramBufferRdEn = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} ))
val dmiProgramBufferAccessLegal = WireInit(false.B)
val dmiProgramBufferWrEnMaybe = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} ))
val dmiAbstractDataRdEn = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} ))
val dmiAbstractDataAccessLegal = WireInit(false.B)
val dmiAbstractDataWrEnMaybe = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} ))
//--------------------------------------------------------------
// Registers coming from 'CONTROL' in Outer
//--------------------------------------------------------------
val dmAuthenticated = io.auth.map(a => a.dmAuthenticated).getOrElse(true.B)
val selectedHartReg = Reg(UInt(p(MaxHartIdBits).W))
// hamaskFull is a vector of all selected harts including hartsel, whether or not supportHartArray is true
val hamaskFull = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
if (nComponents > 1) {
when (~io.dmactive) {
selectedHartReg := 0.U
}.elsewhen (io.innerCtrl.fire){
selectedHartReg := io.innerCtrl.bits.hartsel
}
}
if (supportHartArray) {
val hamaskZero = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
val hamaskReg = Reg(Vec(nComponents, Bool()))
when (~io.dmactive || ~dmAuthenticated) {
hamaskReg := hamaskZero
}.elsewhen (io.innerCtrl.fire){
hamaskReg := Mux(io.innerCtrl.bits.hasel, io.innerCtrl.bits.hamask, hamaskZero)
}
hamaskFull := hamaskReg
}
// Outer.hamask doesn't consider the hart selected by dmcontrol.hartsello,
// so append it here
when (selectedHartReg < nComponents.U) {
hamaskFull(if (nComponents == 1) 0.U(0.W) else selectedHartReg) := true.B
}
io.innerCtrl.ready := true.B
// Construct a Vec from io.innerCtrl fields indicating whether each hart is being selected in this write
// A hart may be selected by hartsel field or by hart array
val hamaskWrSel = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
for (component <- 0 until nComponents ) {
hamaskWrSel(component) := ((io.innerCtrl.bits.hartsel === component.U) ||
(if (supportHartArray) io.innerCtrl.bits.hasel && io.innerCtrl.bits.hamask(component) else false.B))
}
//-------------------------------------
// Halt-on-reset logic
// hrmask is set in dmOuter and passed in
// Debug interrupt is generated when a reset occurs whose corresponding hrmask bit is set
// Debug interrupt is maintained until the hart enters halted state
//-------------------------------------
val hrReset = WireInit(VecInit(Seq.fill(nComponents) { false.B } ))
val hrDebugInt = Wire(Vec(nComponents, Bool()))
val hrmaskReg = RegInit(hrReset)
val hartIsInResetSync = Wire(Vec(nComponents, Bool()))
for (component <- 0 until nComponents) {
hartIsInResetSync(component) := AsyncResetSynchronizerShiftReg(io.hartIsInReset(component), 3, Some(s"debug_hartReset_$component"))
}
when (~io.dmactive || ~dmAuthenticated) {
hrmaskReg := hrReset
}.elsewhen (io.innerCtrl.fire){
hrmaskReg := io.innerCtrl.bits.hrmask
}
withReset(reset.asAsyncReset) { // ensure interrupt requests are negated at first clock edge
val hrDebugIntReg = RegInit(VecInit(Seq.fill(nComponents) { false.B } ))
when (~io.dmactive || ~dmAuthenticated) {
hrDebugIntReg := hrReset
}.otherwise {
hrDebugIntReg := hrmaskReg &
(hartIsInResetSync | // set debugInt during reset
(hrDebugIntReg & ~(haltedBitRegs.asBools))) // maintain until core halts
}
hrDebugInt := hrDebugIntReg
}
//--------------------------------------------------------------
// DMI Registers
//--------------------------------------------------------------
//----DMSTATUS
val DMSTATUSRdData = WireInit(0.U.asTypeOf(new DMSTATUSFields()))
DMSTATUSRdData.authenticated := dmAuthenticated
DMSTATUSRdData.version := 2.U // Version 0.13
io.auth.map(a => DMSTATUSRdData.authbusy := a.dmAuthBusy)
val resumereq = io.innerCtrl.fire && io.innerCtrl.bits.resumereq
when (dmAuthenticated) {
DMSTATUSRdData.hasresethaltreq := true.B
DMSTATUSRdData.anynonexistent := (selectedHartReg >= nComponents.U) // only hartsel can be nonexistent
// all harts nonexistent if hartsel is out of range and there are no harts selected in the hart array
DMSTATUSRdData.allnonexistent := (selectedHartReg >= nComponents.U) & (~hamaskFull.reduce(_ | _))
when (~DMSTATUSRdData.allnonexistent) { // if no existent harts selected, all other status is false
DMSTATUSRdData.anyunavail := (io.debugUnavail & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyhalted := ((~io.debugUnavail & (haltedBitRegs.asBools)) & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyrunning := ((~io.debugUnavail & ~(haltedBitRegs.asBools)) & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyhavereset := (haveResetBitRegs.asBools & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyresumeack := (resumeAcks.asBools & hamaskFull).reduce(_ | _)
when (~DMSTATUSRdData.anynonexistent) { // if one hart is nonexistent, no 'all' status is set
DMSTATUSRdData.allunavail := (io.debugUnavail | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allhalted := ((~io.debugUnavail & (haltedBitRegs.asBools)) | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allrunning := ((~io.debugUnavail & ~(haltedBitRegs.asBools)) | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allhavereset := (haveResetBitRegs.asBools | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allresumeack := (resumeAcks.asBools | ~hamaskFull).reduce(_ & _)
}
}
//TODO
DMSTATUSRdData.confstrptrvalid := false.B
DMSTATUSRdData.impebreak := (cfg.hasImplicitEbreak).B
}
when(~io.dmactive || ~dmAuthenticated) {
haveResetBitRegs := 0.U
}.otherwise {
when (io.innerCtrl.fire && io.innerCtrl.bits.ackhavereset) {
haveResetBitRegs := (haveResetBitRegs & (~(hamaskWrSel.asUInt))) | hartIsInResetSync.asUInt
}.otherwise {
haveResetBitRegs := haveResetBitRegs | hartIsInResetSync.asUInt
}
}
//----DMCS2 (Halt Groups)
val DMCS2RdData = WireInit(0.U.asTypeOf(new DMCS2Fields()))
val DMCS2WrData = WireInit(0.U.asTypeOf(new DMCS2Fields()))
val hgselectWrEn = WireInit(false.B)
val hgwriteWrEn = WireInit(false.B)
val haltgroupWrEn = WireInit(false.B)
val exttriggerWrEn = WireInit(false.B)
val hgDebugInt = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
if (nHaltGroups > 0) withReset (reset.asAsyncReset) { // async reset ensures triggers don't falsely fire during startup
val hgBits = log2Up(nHaltGroups)
// hgParticipate: Each entry indicates which hg that entity belongs to (1 to nHartGroups). 0 means no hg assigned.
val hgParticipateHart = RegInit(VecInit(Seq.fill(nComponents)(0.U(hgBits.W))))
val hgParticipateTrig = if (nExtTriggers > 0) RegInit(VecInit(Seq.fill(nExtTriggers)(0.U(hgBits.W)))) else Nil
// assign group index to current seledcted harts
for (component <- 0 until nComponents) {
when (~io.dmactive || ~dmAuthenticated) {
hgParticipateHart(component) := 0.U
}.otherwise {
when (haltgroupWrEn & DMCS2WrData.hgwrite & ~DMCS2WrData.hgselect &
hamaskFull(component) & (DMCS2WrData.haltgroup <= nHaltGroups.U)) {
hgParticipateHart(component) := DMCS2WrData.haltgroup
}
}
}
DMCS2RdData.haltgroup := hgParticipateHart(if (nComponents == 1) 0.U(0.W) else selectedHartReg)
if (nExtTriggers > 0) {
val hgSelect = Reg(Bool())
when (~io.dmactive || ~dmAuthenticated) {
hgSelect := false.B
}.otherwise {
when (hgselectWrEn) {
hgSelect := DMCS2WrData.hgselect
}
}
// assign group index to trigger
for (trigger <- 0 until nExtTriggers) {
when (~io.dmactive || ~dmAuthenticated) {
hgParticipateTrig(trigger) := 0.U
}.otherwise {
when (haltgroupWrEn & DMCS2WrData.hgwrite & DMCS2WrData.hgselect &
(DMCS2WrData.exttrigger === trigger.U) & (DMCS2WrData.haltgroup <= nHaltGroups.U)) {
hgParticipateTrig(trigger) := DMCS2WrData.haltgroup
}
}
}
DMCS2RdData.hgselect := hgSelect
when (hgSelect) {
DMCS2RdData.haltgroup := hgParticipateTrig(0)
}
// If there is only 1 ext trigger, then the exttrigger field is fixed at 0
// Otherwise, instantiate a register with only the number of bits required
if (nExtTriggers > 1) {
val trigBits = log2Up(nExtTriggers-1)
val hgExtTrigger = Reg(UInt(trigBits.W))
when (~io.dmactive || ~dmAuthenticated) {
hgExtTrigger := 0.U
}.otherwise {
when (exttriggerWrEn & (DMCS2WrData.exttrigger < nExtTriggers.U)) {
hgExtTrigger := DMCS2WrData.exttrigger
}
}
DMCS2RdData.exttrigger := hgExtTrigger
when (hgSelect) {
DMCS2RdData.haltgroup := hgParticipateTrig(hgExtTrigger)
}
}
}
// Halt group state machine
// IDLE: Go to FIRED when any hart in this hg writes to HALTED while its HaltedBitRegs=0
// or when any trigin assigned to this hg occurs
// FIRED: Back to IDLE when all harts in this hg have set their haltedBitRegs
// and all trig out in this hg have been acknowledged
val hgFired = RegInit (VecInit(Seq.fill(nHaltGroups+1) {false.B} ))
val hgHartFiring = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // which hg's are firing due to hart halting
val hgTrigFiring = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // which hg's are firing due to trig in
val hgHartsAllHalted = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // in which hg's have all harts halted
val hgTrigsAllAcked = WireInit(VecInit(Seq.fill(nHaltGroups+1) { true.B} )) // in which hg's have all trigouts been acked
io.extTrigger.foreach {extTrigger =>
val extTriggerInReq = Wire(Vec(nExtTriggers, Bool()))
val extTriggerOutAck = Wire(Vec(nExtTriggers, Bool()))
extTriggerInReq := extTrigger.in.req.asBools
extTriggerOutAck := extTrigger.out.ack.asBools
val trigInReq = ResetSynchronizerShiftReg(in=extTriggerInReq, sync=3, name=Some("dm_extTriggerInReqSync"))
val trigOutAck = ResetSynchronizerShiftReg(in=extTriggerOutAck, sync=3, name=Some("dm_extTriggerOutAckSync"))
for (hg <- 1 to nHaltGroups) {
hgTrigFiring(hg) := (trigInReq & ~RegNext(trigInReq) & hgParticipateTrig.map(_ === hg.U)).reduce(_ | _)
hgTrigsAllAcked(hg) := (trigOutAck | hgParticipateTrig.map(_ =/= hg.U)).reduce(_ & _)
}
extTrigger.in.ack := trigInReq.asUInt
}
for (hg <- 1 to nHaltGroups) {
hgHartFiring(hg) := hartHaltedWrEn & ~haltedBitRegs(hartHaltedId) & (hgParticipateHart(hartSelFuncs.hartIdToHartSel(hartHaltedId)) === hg.U)
hgHartsAllHalted(hg) := (haltedBitRegs.asBools | hgParticipateHart.map(_ =/= hg.U)).reduce(_ & _)
when (~io.dmactive || ~dmAuthenticated) {
hgFired(hg) := false.B
}.elsewhen (~hgFired(hg) & (hgHartFiring(hg) | hgTrigFiring(hg))) {
hgFired(hg) := true.B
}.elsewhen ( hgFired(hg) & hgHartsAllHalted(hg) & hgTrigsAllAcked(hg)) {
hgFired(hg) := false.B
}
}
// For each hg that has fired, assert debug interrupt to each hart in that hg
for (component <- 0 until nComponents) {
hgDebugInt(component) := hgFired(hgParticipateHart(component))
}
// For each hg that has fired, assert trigger out for all external triggers in that hg
io.extTrigger.foreach {extTrigger =>
val extTriggerOutReq = RegInit(VecInit(Seq.fill(cfg.nExtTriggers) {false.B} ))
for (trig <- 0 until nExtTriggers) {
extTriggerOutReq(trig) := hgFired(hgParticipateTrig(trig))
}
extTrigger.out.req := extTriggerOutReq.asUInt
}
}
io.hgDebugInt := hgDebugInt | hrDebugInt
//----HALTSUM*
val numHaltedStatus = ((nComponents - 1) / 32) + 1
val haltedStatus = Wire(Vec(numHaltedStatus, Bits(32.W)))
for (ii <- 0 until numHaltedStatus) {
when (dmAuthenticated) {
haltedStatus(ii) := haltedBitRegs >> (ii*32)
}.otherwise {
haltedStatus(ii) := 0.U
}
}
val haltedSummary = Cat(haltedStatus.map(_.orR).reverse)
val HALTSUM1RdData = haltedSummary.asTypeOf(new HALTSUM1Fields())
val selectedHaltedStatus = Mux((selectedHartReg >> 5) > numHaltedStatus.U, 0.U, haltedStatus(selectedHartReg >> 5))
val HALTSUM0RdData = selectedHaltedStatus.asTypeOf(new HALTSUM0Fields())
// Since we only support 1024 harts, we don't implement HALTSUM2 or HALTSUM3
//----ABSTRACTCS
val ABSTRACTCSReset = WireInit(0.U.asTypeOf(new ABSTRACTCSFields()))
ABSTRACTCSReset.datacount := cfg.nAbstractDataWords.U
ABSTRACTCSReset.progbufsize := cfg.nProgramBufferWords.U
val ABSTRACTCSReg = Reg(new ABSTRACTCSFields())
val ABSTRACTCSWrData = WireInit(0.U.asTypeOf(new ABSTRACTCSFields()))
val ABSTRACTCSRdData = WireInit(ABSTRACTCSReg)
val ABSTRACTCSRdEn = WireInit(false.B)
val ABSTRACTCSWrEnMaybe = WireInit(false.B)
val ABSTRACTCSWrEnLegal = WireInit(false.B)
val ABSTRACTCSWrEn = ABSTRACTCSWrEnMaybe && ABSTRACTCSWrEnLegal
// multiple error types
// find implement in the state machine part
val errorBusy = WireInit(false.B)
val errorException = WireInit(false.B)
val errorUnsupported = WireInit(false.B)
val errorHaltResume = WireInit(false.B)
when (~io.dmactive || ~dmAuthenticated) {
ABSTRACTCSReg := ABSTRACTCSReset
}.otherwise {
when (errorBusy){
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrBusy.id.U
}.elsewhen (errorException) {
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrException.id.U
}.elsewhen (errorUnsupported) {
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrNotSupported.id.U
}.elsewhen (errorHaltResume) {
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrHaltResume.id.U
}.otherwise {
//W1C
when (ABSTRACTCSWrEn){
ABSTRACTCSReg.cmderr := ABSTRACTCSReg.cmderr & ~(ABSTRACTCSWrData.cmderr);
}
}
}
// For busy, see below state machine.
val abstractCommandBusy = WireInit(true.B)
ABSTRACTCSRdData.busy := abstractCommandBusy
when (~dmAuthenticated) { // read value must be 0 when not authenticated
ABSTRACTCSRdData.datacount := 0.U
ABSTRACTCSRdData.progbufsize := 0.U
}
//---- ABSTRACTAUTO
// It is a mask indicating whether datai/probufi have the autoexcution permisson
// this part aims to produce 3 wires : autoexecData,autoexecProg,autoexec
// first two specify which reg supports autoexec
// autoexec is a control signal, meaning there is at least one enabled autoexec reg
// when autoexec is set, generate instructions using COMMAND register
val ABSTRACTAUTOReset = WireInit(0.U.asTypeOf(new ABSTRACTAUTOFields()))
val ABSTRACTAUTOReg = Reg(new ABSTRACTAUTOFields())
val ABSTRACTAUTOWrData = WireInit(0.U.asTypeOf(new ABSTRACTAUTOFields()))
val ABSTRACTAUTORdData = WireInit(ABSTRACTAUTOReg)
val ABSTRACTAUTORdEn = WireInit(false.B)
val autoexecdataWrEnMaybe = WireInit(false.B)
val autoexecprogbufWrEnMaybe = WireInit(false.B)
val ABSTRACTAUTOWrEnLegal = WireInit(false.B)
when (~io.dmactive || ~dmAuthenticated) {
ABSTRACTAUTOReg := ABSTRACTAUTOReset
}.otherwise {
when (autoexecprogbufWrEnMaybe && ABSTRACTAUTOWrEnLegal) {
ABSTRACTAUTOReg.autoexecprogbuf := ABSTRACTAUTOWrData.autoexecprogbuf & ( (1 << cfg.nProgramBufferWords) - 1).U
}
when (autoexecdataWrEnMaybe && ABSTRACTAUTOWrEnLegal) {
ABSTRACTAUTOReg.autoexecdata := ABSTRACTAUTOWrData.autoexecdata & ( (1 << cfg.nAbstractDataWords) - 1).U
}
}
// Abstract Data access vector(byte-addressable)
val dmiAbstractDataAccessVec = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} ))
dmiAbstractDataAccessVec := (dmiAbstractDataWrEnMaybe zip dmiAbstractDataRdEn).map{ case (r,w) => r | w}
// Program Buffer access vector(byte-addressable)
val dmiProgramBufferAccessVec = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} ))
dmiProgramBufferAccessVec := (dmiProgramBufferWrEnMaybe zip dmiProgramBufferRdEn).map{ case (r,w) => r | w}
// at least one word access
val dmiAbstractDataAccess = dmiAbstractDataAccessVec.reduce(_ || _ )
val dmiProgramBufferAccess = dmiProgramBufferAccessVec.reduce(_ || _)
// This will take the shorter of the lists, which is what we want.
val autoexecData = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords) {false.B} ))
val autoexecProg = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords) {false.B} ))
(autoexecData zip ABSTRACTAUTOReg.autoexecdata.asBools).zipWithIndex.foreach {case (t, i) => t._1 := dmiAbstractDataAccessVec(i * 4) && t._2 }
(autoexecProg zip ABSTRACTAUTOReg.autoexecprogbuf.asBools).zipWithIndex.foreach {case (t, i) => t._1 := dmiProgramBufferAccessVec(i * 4) && t._2}
val autoexec = autoexecData.reduce(_ || _) || autoexecProg.reduce(_ || _)
//---- COMMAND
val COMMANDReset = WireInit(0.U.asTypeOf(new COMMANDFields()))
val COMMANDReg = Reg(new COMMANDFields())
val COMMANDWrDataVal = WireInit(0.U(32.W))
val COMMANDWrData = WireInit(COMMANDWrDataVal.asTypeOf(new COMMANDFields()))
val COMMANDWrEnMaybe = WireInit(false.B)
val COMMANDWrEnLegal = WireInit(false.B)
val COMMANDRdEn = WireInit(false.B)
val COMMANDWrEn = COMMANDWrEnMaybe && COMMANDWrEnLegal
val COMMANDRdData = COMMANDReg
when (~io.dmactive || ~dmAuthenticated) {
COMMANDReg := COMMANDReset
}.otherwise {
when (COMMANDWrEn) {
COMMANDReg := COMMANDWrData
}
}
// --- Abstract Data
// These are byte addressible, s.t. the Processor can use
// byte-addressible instructions to store to them.
val abstractDataMem = Reg(Vec(cfg.nAbstractDataWords*4, UInt(8.W)))
val abstractDataNxt = WireInit(abstractDataMem)
// --- Program Buffer
// byte-addressible mem
val programBufferMem = Reg(Vec(cfg.nProgramBufferWords*4, UInt(8.W)))
val programBufferNxt = WireInit(programBufferMem)
//--------------------------------------------------------------
// These bits are implementation-specific bits set
// by harts executing code.
//--------------------------------------------------------------
// Run control logic
when (~io.dmactive || ~dmAuthenticated) {
haltedBitRegs := 0.U
resumeReqRegs := 0.U
}.otherwise {
//remove those harts in reset
resumeReqRegs := resumeReqRegs & ~(hartIsInResetSync.asUInt)
val hartHaltedIdIndex = UIntToOH(hartSelFuncs.hartIdToHartSel(hartHaltedId))
val hartResumingIdIndex = UIntToOH(hartSelFuncs.hartIdToHartSel(hartResumingId))
val hartselIndex = UIntToOH(io.innerCtrl.bits.hartsel)
when (hartHaltedWrEn) {
// add those harts halting and remove those in reset
haltedBitRegs := (haltedBitRegs | hartHaltedIdIndex) & ~(hartIsInResetSync.asUInt)
}.elsewhen (hartResumingWrEn) {
// remove those harts in reset and those in resume
haltedBitRegs := (haltedBitRegs & ~(hartResumingIdIndex)) & ~(hartIsInResetSync.asUInt)
}.otherwise {
// remove those harts in reset
haltedBitRegs := haltedBitRegs & ~(hartIsInResetSync.asUInt)
}
when (hartResumingWrEn) {
// remove those harts in resume and those in reset
resumeReqRegs := (resumeReqRegs & ~(hartResumingIdIndex)) & ~(hartIsInResetSync.asUInt)
}
when (resumereq) {
// set all sleceted harts to resumeReq, remove those in reset
resumeReqRegs := (resumeReqRegs | hamaskWrSel.asUInt) & ~(hartIsInResetSync.asUInt)
}
}
when (resumereq) {
// next cycle resumeAcls will be the negation of next cycle resumeReqRegs
resumeAcks := (~resumeReqRegs & ~(hamaskWrSel.asUInt))
}.otherwise {
resumeAcks := ~resumeReqRegs
}
//---- AUTHDATA
val authRdEnMaybe = WireInit(false.B)
val authWrEnMaybe = WireInit(false.B)
io.auth.map { a =>
a.dmactive := io.dmactive
a.dmAuthRead := authRdEnMaybe & ~a.dmAuthBusy
a.dmAuthWrite := authWrEnMaybe & ~a.dmAuthBusy
}
val dmstatusRegFields = RegFieldGroup("dmi_dmstatus", Some("debug module status register"), Seq(
RegField.r(4, DMSTATUSRdData.version, RegFieldDesc("version", "version", reset=Some(2))),
RegField.r(1, DMSTATUSRdData.confstrptrvalid, RegFieldDesc("confstrptrvalid", "confstrptrvalid", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.hasresethaltreq, RegFieldDesc("hasresethaltreq", "hasresethaltreq", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.authbusy, RegFieldDesc("authbusy", "authbusy", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.authenticated, RegFieldDesc("authenticated", "authenticated", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.anyhalted, RegFieldDesc("anyhalted", "anyhalted", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allhalted, RegFieldDesc("allhalted", "allhalted", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.anyrunning, RegFieldDesc("anyrunning", "anyrunning", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.allrunning, RegFieldDesc("allrunning", "allrunning", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.anyunavail, RegFieldDesc("anyunavail", "anyunavail", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allunavail, RegFieldDesc("allunavail", "allunavail", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.anynonexistent, RegFieldDesc("anynonexistent", "anynonexistent", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allnonexistent, RegFieldDesc("allnonexistent", "allnonexistent", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.anyresumeack, RegFieldDesc("anyresumeack", "anyresumeack", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.allresumeack, RegFieldDesc("allresumeack", "allresumeack", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.anyhavereset, RegFieldDesc("anyhavereset", "anyhavereset", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allhavereset, RegFieldDesc("allhavereset", "allhavereset", reset=Some(0))),
RegField(2),
RegField.r(1, DMSTATUSRdData.impebreak, RegFieldDesc("impebreak", "impebreak", reset=Some(if (cfg.hasImplicitEbreak) 1 else 0)))
))
val dmcs2RegFields = RegFieldGroup("dmi_dmcs2", Some("debug module control/status register 2"), Seq(
WNotifyVal(1, DMCS2RdData.hgselect, DMCS2WrData.hgselect, hgselectWrEn,
RegFieldDesc("hgselect", "select halt groups or external triggers", reset=Some(0), volatile=true)),
WNotifyVal(1, 0.U, DMCS2WrData.hgwrite, hgwriteWrEn,
RegFieldDesc("hgwrite", "write 1 to change halt groups", reset=None, access=RegFieldAccessType.W)),
WNotifyVal(5, DMCS2RdData.haltgroup, DMCS2WrData.haltgroup, haltgroupWrEn,
RegFieldDesc("haltgroup", "halt group", reset=Some(0), volatile=true)),
if (nExtTriggers > 1)
WNotifyVal(4, DMCS2RdData.exttrigger, DMCS2WrData.exttrigger, exttriggerWrEn,
RegFieldDesc("exttrigger", "external trigger select", reset=Some(0), volatile=true))
else RegField(4)
))
val abstractcsRegFields = RegFieldGroup("dmi_abstractcs", Some("abstract command control/status"), Seq(
RegField.r(4, ABSTRACTCSRdData.datacount, RegFieldDesc("datacount", "number of DATA registers", reset=Some(cfg.nAbstractDataWords))),
RegField(4),
WNotifyVal(3, ABSTRACTCSRdData.cmderr, ABSTRACTCSWrData.cmderr, ABSTRACTCSWrEnMaybe,
RegFieldDesc("cmderr", "command error", reset=Some(0), wrType=Some(RegFieldWrType.ONE_TO_CLEAR))),
RegField(1),
RegField.r(1, ABSTRACTCSRdData.busy, RegFieldDesc("busy", "busy", reset=Some(0))),
RegField(11),
RegField.r(5, ABSTRACTCSRdData.progbufsize, RegFieldDesc("progbufsize", "number of PROGBUF registers", reset=Some(cfg.nProgramBufferWords)))
))
val (sbcsFields, sbAddrFields, sbDataFields):
(Seq[RegField], Seq[Seq[RegField]], Seq[Seq[RegField]]) = sb2tlOpt.map{ sb2tl =>
SystemBusAccessModule(sb2tl, io.dmactive, dmAuthenticated)(p)
}.getOrElse((Seq.empty[RegField], Seq.fill[Seq[RegField]](4)(Seq.empty[RegField]), Seq.fill[Seq[RegField]](4)(Seq.empty[RegField])))
//--------------------------------------------------------------
// Program Buffer Access (DMI ... System Bus can override)
//--------------------------------------------------------------
val omRegMap = dmiNode.regmap(
(DMI_DMSTATUS << 2) -> dmstatusRegFields,
//TODO (DMI_CFGSTRADDR0 << 2) -> cfgStrAddrFields,
(DMI_DMCS2 << 2) -> (if (nHaltGroups > 0) dmcs2RegFields else Nil),
(DMI_HALTSUM0 << 2) -> RegFieldGroup("dmi_haltsum0", Some("Halt Summary 0"),
Seq(RegField.r(32, HALTSUM0RdData.asUInt, RegFieldDesc("dmi_haltsum0", "halt summary 0")))),
(DMI_HALTSUM1 << 2) -> RegFieldGroup("dmi_haltsum1", Some("Halt Summary 1"),
Seq(RegField.r(32, HALTSUM1RdData.asUInt, RegFieldDesc("dmi_haltsum1", "halt summary 1")))),
(DMI_ABSTRACTCS << 2) -> abstractcsRegFields,
(DMI_ABSTRACTAUTO<< 2) -> RegFieldGroup("dmi_abstractauto", Some("abstract command autoexec"), Seq(
WNotifyVal(cfg.nAbstractDataWords, ABSTRACTAUTORdData.autoexecdata, ABSTRACTAUTOWrData.autoexecdata, autoexecdataWrEnMaybe,
RegFieldDesc("autoexecdata", "abstract command data autoexec", reset=Some(0))),
RegField(16-cfg.nAbstractDataWords),
WNotifyVal(cfg.nProgramBufferWords, ABSTRACTAUTORdData.autoexecprogbuf, ABSTRACTAUTOWrData.autoexecprogbuf, autoexecprogbufWrEnMaybe,
RegFieldDesc("autoexecprogbuf", "abstract command progbuf autoexec", reset=Some(0))))),
(DMI_COMMAND << 2) -> RegFieldGroup("dmi_command", Some("Abstract Command Register"),
Seq(RWNotify(32, COMMANDRdData.asUInt, COMMANDWrDataVal, COMMANDRdEn, COMMANDWrEnMaybe,
Some(RegFieldDesc("dmi_command", "abstract command register", reset=Some(0), volatile=true))))),
(DMI_DATA0 << 2) -> RegFieldGroup("dmi_data", Some("abstract command data registers"), abstractDataMem.zipWithIndex.map{case (x, i) =>
RWNotify(8, Mux(dmAuthenticated, x, 0.U), abstractDataNxt(i),
dmiAbstractDataRdEn(i),
dmiAbstractDataWrEnMaybe(i),
Some(RegFieldDesc(s"dmi_data_$i", s"abstract command data register $i", reset = Some(0), volatile=true)))}, false),
(DMI_PROGBUF0 << 2) -> RegFieldGroup("dmi_progbuf", Some("abstract command progbuf registers"), programBufferMem.zipWithIndex.map{case (x, i) =>
RWNotify(8, Mux(dmAuthenticated, x, 0.U), programBufferNxt(i),
dmiProgramBufferRdEn(i),
dmiProgramBufferWrEnMaybe(i),
Some(RegFieldDesc(s"dmi_progbuf_$i", s"abstract command progbuf register $i", reset = Some(0))))}, false),
(DMI_AUTHDATA << 2) -> (if (cfg.hasAuthentication) RegFieldGroup("dmi_authdata", Some("authentication data exchange register"),
Seq(RWNotify(32, io.auth.get.dmAuthRdata, io.auth.get.dmAuthWdata, authRdEnMaybe, authWrEnMaybe,
Some(RegFieldDesc("authdata", "authentication data exchange", volatile=true))))) else Nil),
(DMI_SBCS << 2) -> sbcsFields,
(DMI_SBDATA0 << 2) -> sbDataFields(0),
(DMI_SBDATA1 << 2) -> sbDataFields(1),
(DMI_SBDATA2 << 2) -> sbDataFields(2),
(DMI_SBDATA3 << 2) -> sbDataFields(3),
(DMI_SBADDRESS0 << 2) -> sbAddrFields(0),
(DMI_SBADDRESS1 << 2) -> sbAddrFields(1),
(DMI_SBADDRESS2 << 2) -> sbAddrFields(2),
(DMI_SBADDRESS3 << 2) -> sbAddrFields(3)
)
// Abstract data mem is written by both the tile link interface and DMI...
abstractDataMem.zipWithIndex.foreach { case (x, i) =>
when (dmAuthenticated && dmiAbstractDataWrEnMaybe(i) && dmiAbstractDataAccessLegal) {
x := abstractDataNxt(i)
}
}
// ... and also by custom register read (if implemented)
val (customs, customParams) = customNode.in.unzip
val needCustom = (customs.size > 0) && (customParams.head.addrs.size > 0)
def getNeedCustom = () => needCustom
if (needCustom) {
val (custom, customP) = customNode.in.head
require(customP.width % 8 == 0, s"Debug Custom width must be divisible by 8, not ${customP.width}")
val custom_data = custom.data.asBools
val custom_bytes = Seq.tabulate(customP.width/8){i => custom_data.slice(i*8, (i+1)*8).asUInt}
when (custom.ready && custom.valid) {
(abstractDataMem zip custom_bytes).zipWithIndex.foreach {case ((a, b), i) =>
a := b
}
}
}
programBufferMem.zipWithIndex.foreach { case (x, i) =>
when (dmAuthenticated && dmiProgramBufferWrEnMaybe(i) && dmiProgramBufferAccessLegal) {
x := programBufferNxt(i)
}
}
//--------------------------------------------------------------
// "Variable" ROM Generation
//--------------------------------------------------------------
val goReg = Reg(Bool())
val goAbstract = WireInit(false.B)
val goCustom = WireInit(false.B)
val jalAbstract = WireInit(Instructions.JAL.value.U.asTypeOf(new GeneratedUJ()))
jalAbstract.setImm(ABSTRACT(cfg) - WHERETO)
when (~io.dmactive){
goReg := false.B
}.otherwise {
when (goAbstract) {
goReg := true.B
}.elsewhen (hartGoingWrEn){
assert(hartGoingId === 0.U, "Unexpected 'GOING' hart.")//Chisel3 #540 %x, expected %x", hartGoingId, 0.U)
goReg := false.B
}
}
class flagBundle extends Bundle {
val reserved = UInt(6.W)
val resume = Bool()
val go = Bool()
}
val flags = WireInit(VecInit(Seq.fill(1 << selectedHartReg.getWidth) {0.U.asTypeOf(new flagBundle())} ))
assert ((hartSelFuncs.hartSelToHartId(selectedHartReg) < flags.size.U),
s"HartSel to HartId Mapping is illegal for this Debug Implementation, because HartID must be < ${flags.size} for it to work.")
flags(hartSelFuncs.hartSelToHartId(selectedHartReg)).go := goReg
for (component <- 0 until nComponents) {
val componentSel = WireInit(component.U)
flags(hartSelFuncs.hartSelToHartId(componentSel)).resume := resumeReqRegs(component)
}
//----------------------------
// Abstract Command Decoding & Generation
//----------------------------
val accessRegisterCommandWr = WireInit(COMMANDWrData.asUInt.asTypeOf(new ACCESS_REGISTERFields()))
/** real COMMAND*/
val accessRegisterCommandReg = WireInit(COMMANDReg.asUInt.asTypeOf(new ACCESS_REGISTERFields()))
// TODO: Quick Access
class GeneratedI extends Bundle {
val imm = UInt(12.W)
val rs1 = UInt(5.W)
val funct3 = UInt(3.W)
val rd = UInt(5.W)
val opcode = UInt(7.W)
}
class GeneratedS extends Bundle {
val immhi = UInt(7.W)
val rs2 = UInt(5.W)
val rs1 = UInt(5.W)
val funct3 = UInt(3.W)
val immlo = UInt(5.W)
val opcode = UInt(7.W)
}
class GeneratedCSR extends Bundle {
val imm = UInt(12.W)
val rs1 = UInt(5.W)
val funct3 = UInt(3.W)
val rd = UInt(5.W)
val opcode = UInt(7.W)
}
class GeneratedUJ extends Bundle {
val imm3 = UInt(1.W)
val imm0 = UInt(10.W)
val imm1 = UInt(1.W)
val imm2 = UInt(8.W)
val rd = UInt(5.W)
val opcode = UInt(7.W)
def setImm(imm: Int) : Unit = {
// TODO: Check bounds of imm.
require(imm % 2 == 0, "Immediate must be even for UJ encoding.")
val immWire = WireInit(imm.S(21.W))
val immBits = WireInit(VecInit(immWire.asBools))
imm0 := immBits.slice(1, 1 + 10).asUInt
imm1 := immBits.slice(11, 11 + 11).asUInt
imm2 := immBits.slice(12, 12 + 8).asUInt
imm3 := immBits.slice(20, 20 + 1).asUInt
}
}
require((cfg.atzero && cfg.nAbstractInstructions == 2) || (!cfg.atzero && cfg.nAbstractInstructions == 5),
"Mismatch between DebugModuleParams atzero and nAbstractInstructions")
val abstractGeneratedMem = Reg(Vec(cfg.nAbstractInstructions, (UInt(32.W))))
def abstractGeneratedI(cfg: DebugModuleParams): UInt = {
val inst = Wire(new GeneratedI())
val offset = if (cfg.atzero) DATA else (DATA-0x800) & 0xFFF
val base = if (cfg.atzero) 0.U else Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U)
inst.opcode := (Instructions.LW.value.U.asTypeOf(new GeneratedI())).opcode
inst.rd := (accessRegisterCommandReg.regno & 0x1F.U)
inst.funct3 := accessRegisterCommandReg.size
inst.rs1 := base
inst.imm := offset.U
inst.asUInt
}
def abstractGeneratedS(cfg: DebugModuleParams): UInt = {
val inst = Wire(new GeneratedS())
val offset = if (cfg.atzero) DATA else (DATA-0x800) & 0xFFF
val base = if (cfg.atzero) 0.U else Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U)
inst.opcode := (Instructions.SW.value.U.asTypeOf(new GeneratedS())).opcode
inst.immlo := (offset & 0x1F).U
inst.funct3 := accessRegisterCommandReg.size
inst.rs1 := base
inst.rs2 := (accessRegisterCommandReg.regno & 0x1F.U)
inst.immhi := (offset >> 5).U
inst.asUInt
}
def abstractGeneratedCSR: UInt = {
val inst = Wire(new GeneratedCSR())
val base = Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U) // use s0 as base for odd regs, s1 as base for even regs
inst := (Instructions.CSRRW.value.U.asTypeOf(new GeneratedCSR()))
inst.imm := CSRs.dscratch1.U
inst.rs1 := base
inst.rd := base
inst.asUInt
}
val nop = Wire(new GeneratedI())
nop := Instructions.ADDI.value.U.asTypeOf(new GeneratedI())
nop.rd := 0.U
nop.rs1 := 0.U
nop.imm := 0.U
val isa = Wire(new GeneratedI())
isa := Instructions.ADDIW.value.U.asTypeOf(new GeneratedI())
isa.rd := 0.U
isa.rs1 := 0.U
isa.imm := 0.U
when (goAbstract) {
if (cfg.nAbstractInstructions == 2) {
// ABSTRACT(0): Transfer: LW or SW, else NOP
// ABSTRACT(1): Postexec: NOP else EBREAK
abstractGeneratedMem(0) := Mux(accessRegisterCommandReg.transfer,
Mux(accessRegisterCommandReg.write, abstractGeneratedI(cfg), abstractGeneratedS(cfg)),
nop.asUInt
)
abstractGeneratedMem(1) := Mux(accessRegisterCommandReg.postexec,
nop.asUInt,
Instructions.EBREAK.value.U)
} else {
// Entry: All regs in GPRs, dscratch1=offset 0x800 in DM
// ABSTRACT(0): CheckISA: ADDW or NOP (exception here if size=3 and not RV64)
// ABSTRACT(1): CSRRW s1,dscratch1,s1 or CSRRW s0,dscratch1,s0
// ABSTRACT(2): Transfer: LW, SW, LD, SD else NOP
// ABSTRACT(3): CSRRW s1,dscratch1,s1 or CSRRW s0,dscratch1,s0
// ABSTRACT(4): Postexec: NOP else EBREAK
abstractGeneratedMem(0) := Mux(accessRegisterCommandReg.transfer && accessRegisterCommandReg.size =/= 2.U, isa.asUInt, nop.asUInt)
abstractGeneratedMem(1) := abstractGeneratedCSR
abstractGeneratedMem(2) := Mux(accessRegisterCommandReg.transfer,
Mux(accessRegisterCommandReg.write, abstractGeneratedI(cfg), abstractGeneratedS(cfg)),
nop.asUInt
)
abstractGeneratedMem(3) := abstractGeneratedCSR
abstractGeneratedMem(4) := Mux(accessRegisterCommandReg.postexec,
nop.asUInt,
Instructions.EBREAK.value.U)
}
}
//--------------------------------------------------------------
// Drive Custom Access
//--------------------------------------------------------------
if (needCustom) {
val (custom, customP) = customNode.in.head
custom.addr := accessRegisterCommandReg.regno
custom.valid := goCustom
}
//--------------------------------------------------------------
// Hart Bus Access
//--------------------------------------------------------------
tlNode.regmap(
// This memory is writable.
HALTED -> Seq(WNotifyWire(sbIdWidth, hartHaltedId, hartHaltedWrEn,
"debug_hart_halted", "Debug ROM Causes hart to write its hartID here when it is in Debug Mode.")),
GOING -> Seq(WNotifyWire(sbIdWidth, hartGoingId, hartGoingWrEn,
"debug_hart_going", "Debug ROM causes hart to write 0 here when it begins executing Debug Mode instructions.")),
RESUMING -> Seq(WNotifyWire(sbIdWidth, hartResumingId, hartResumingWrEn,
"debug_hart_resuming", "Debug ROM causes hart to write its hartID here when it leaves Debug Mode.")),
EXCEPTION -> Seq(WNotifyWire(sbIdWidth, hartExceptionId, hartExceptionWrEn,
"debug_hart_exception", "Debug ROM causes hart to write 0 here if it gets an exception in Debug Mode.")),
DATA -> RegFieldGroup("debug_data", Some("Data used to communicate with Debug Module"),
abstractDataMem.zipWithIndex.map {case (x, i) => RegField(8, x, RegFieldDesc(s"debug_data_$i", ""))}),
PROGBUF(cfg)-> RegFieldGroup("debug_progbuf", Some("Program buffer used to communicate with Debug Module"),
programBufferMem.zipWithIndex.map {case (x, i) => RegField(8, x, RegFieldDesc(s"debug_progbuf_$i", ""))}),
// These sections are read-only.
IMPEBREAK(cfg)-> {if (cfg.hasImplicitEbreak) Seq(RegField.r(32, Instructions.EBREAK.value.U,
RegFieldDesc("debug_impebreak", "Debug Implicit EBREAK", reset=Some(Instructions.EBREAK.value)))) else Nil},
WHERETO -> Seq(RegField.r(32, jalAbstract.asUInt, RegFieldDesc("debug_whereto", "Instruction filled in by Debug Module to control hart in Debug Mode", volatile = true))),
ABSTRACT(cfg) -> RegFieldGroup("debug_abstract", Some("Instructions generated by Debug Module"),
abstractGeneratedMem.zipWithIndex.map{ case (x,i) => RegField.r(32, x, RegFieldDesc(s"debug_abstract_$i", "", volatile=true))}),
FLAGS -> RegFieldGroup("debug_flags", Some("Memory region used to control hart going/resuming in Debug Mode"),
if (nComponents == 1) {
Seq.tabulate(1024) { i => RegField.r(8, flags(0).asUInt, RegFieldDesc(s"debug_flags_$i", "", volatile=true)) }
} else {
flags.zipWithIndex.map{case(x, i) => RegField.r(8, x.asUInt, RegFieldDesc(s"debug_flags_$i", "", volatile=true))}
}),
ROMBASE -> RegFieldGroup("debug_rom", Some("Debug ROM"),
(if (cfg.atzero) DebugRomContents() else DebugRomNonzeroContents()).zipWithIndex.map{case (x, i) =>
RegField.r(8, (x & 0xFF).U(8.W), RegFieldDesc(s"debug_rom_$i", "", reset=Some(x)))})
)
// Override System Bus accesses with dmactive reset.
when (~io.dmactive){
abstractDataMem.foreach {x => x := 0.U}
programBufferMem.foreach {x => x := 0.U}
}
//--------------------------------------------------------------
// Abstract Command State Machine
//--------------------------------------------------------------
object CtrlState extends scala.Enumeration {
type CtrlState = Value
val Waiting, CheckGenerate, Exec, Custom = Value
def apply( t : Value) : UInt = {
t.id.U(log2Up(values.size).W)
}
}
import CtrlState._
// This is not an initialization!
val ctrlStateReg = Reg(chiselTypeOf(CtrlState(Waiting)))
val hartHalted = haltedBitRegs(if (nComponents == 1) 0.U(0.W) else selectedHartReg)
val ctrlStateNxt = WireInit(ctrlStateReg)
//------------------------
// DMI Register Control and Status
abstractCommandBusy := (ctrlStateReg =/= CtrlState(Waiting))
ABSTRACTCSWrEnLegal := (ctrlStateReg === CtrlState(Waiting))
COMMANDWrEnLegal := (ctrlStateReg === CtrlState(Waiting))
ABSTRACTAUTOWrEnLegal := (ctrlStateReg === CtrlState(Waiting))
dmiAbstractDataAccessLegal := (ctrlStateReg === CtrlState(Waiting))
dmiProgramBufferAccessLegal := (ctrlStateReg === CtrlState(Waiting))
errorBusy := (ABSTRACTCSWrEnMaybe && ~ABSTRACTCSWrEnLegal) ||
(autoexecdataWrEnMaybe && ~ABSTRACTAUTOWrEnLegal) ||
(autoexecprogbufWrEnMaybe && ~ABSTRACTAUTOWrEnLegal) ||
(COMMANDWrEnMaybe && ~COMMANDWrEnLegal) ||
(dmiAbstractDataAccess && ~dmiAbstractDataAccessLegal) ||
(dmiProgramBufferAccess && ~dmiProgramBufferAccessLegal)
// TODO: Maybe Quick Access
val commandWrIsAccessRegister = (COMMANDWrData.cmdtype === DebugAbstractCommandType.AccessRegister.id.U)
val commandRegIsAccessRegister = (COMMANDReg.cmdtype === DebugAbstractCommandType.AccessRegister.id.U)
val commandWrIsUnsupported = COMMANDWrEn && !commandWrIsAccessRegister
val commandRegIsUnsupported = WireInit(true.B)
val commandRegBadHaltResume = WireInit(false.B)
// We only support abstract commands for GPRs and any custom registers, if specified.
val accessRegIsLegalSize = (accessRegisterCommandReg.size === 2.U) || (accessRegisterCommandReg.size === 3.U)
val accessRegIsGPR = (accessRegisterCommandReg.regno >= 0x1000.U && accessRegisterCommandReg.regno <= 0x101F.U) && accessRegIsLegalSize
val accessRegIsCustom = if (needCustom) {
val (custom, customP) = customNode.in.head
customP.addrs.foldLeft(false.B){
(result, current) => result || (current.U === accessRegisterCommandReg.regno)}
} else false.B
when (commandRegIsAccessRegister) {
when (accessRegIsCustom && accessRegisterCommandReg.transfer && accessRegisterCommandReg.write === false.B) {
commandRegIsUnsupported := false.B
}.elsewhen (!accessRegisterCommandReg.transfer || accessRegIsGPR) {
commandRegIsUnsupported := false.B
commandRegBadHaltResume := ~hartHalted
}
}
val wrAccessRegisterCommand = COMMANDWrEn && commandWrIsAccessRegister && (ABSTRACTCSReg.cmderr === 0.U)
val regAccessRegisterCommand = autoexec && commandRegIsAccessRegister && (ABSTRACTCSReg.cmderr === 0.U)
//------------------------
// Variable ROM STATE MACHINE
// -----------------------
when (ctrlStateReg === CtrlState(Waiting)){
when (wrAccessRegisterCommand || regAccessRegisterCommand) {
ctrlStateNxt := CtrlState(CheckGenerate)
}.elsewhen (commandWrIsUnsupported) { // These checks are really on the command type.
errorUnsupported := true.B
}.elsewhen (autoexec && commandRegIsUnsupported) {
errorUnsupported := true.B
}
}.elsewhen (ctrlStateReg === CtrlState(CheckGenerate)){
// We use this state to ensure that the COMMAND has been
// registered by the time that we need to use it, to avoid
// generating it directly from the COMMANDWrData.
// This 'commandRegIsUnsupported' is really just checking the
// AccessRegisterCommand parameters (regno)
when (commandRegIsUnsupported) {
errorUnsupported := true.B
ctrlStateNxt := CtrlState(Waiting)
}.elsewhen (commandRegBadHaltResume){
errorHaltResume := true.B
ctrlStateNxt := CtrlState(Waiting)
}.otherwise {
when(accessRegIsCustom) {
ctrlStateNxt := CtrlState(Custom)
}.otherwise {
ctrlStateNxt := CtrlState(Exec)
goAbstract := true.B
}
}
}.elsewhen (ctrlStateReg === CtrlState(Exec)) {
// We can't just look at 'hartHalted' here, because
// hartHaltedWrEn is overloaded to mean 'got an ebreak'
// which may have happened when we were already halted.
when(goReg === false.B && hartHaltedWrEn && (hartSelFuncs.hartIdToHartSel(hartHaltedId) === selectedHartReg)){
ctrlStateNxt := CtrlState(Waiting)
}
when(hartExceptionWrEn) {
assert(hartExceptionId === 0.U, "Unexpected 'EXCEPTION' hart")//Chisel3 #540, %x, expected %x", hartExceptionId, 0.U)
ctrlStateNxt := CtrlState(Waiting)
errorException := true.B
}
}.elsewhen (ctrlStateReg === CtrlState(Custom)) {
assert(needCustom.B, "Should not be in custom state unless we need it.")
goCustom := true.B
val (custom, customP) = customNode.in.head
when (custom.ready && custom.valid) {
ctrlStateNxt := CtrlState(Waiting)
}
}
when (~io.dmactive || ~dmAuthenticated) {
ctrlStateReg := CtrlState(Waiting)
}.otherwise {
ctrlStateReg := ctrlStateNxt
}
assert ((!io.dmactive || !hartExceptionWrEn || ctrlStateReg === CtrlState(Exec)),
"Unexpected EXCEPTION write: should only get it in Debug Module EXEC state")
}
}
// Wrapper around TL Debug Module Inner and an Async DMI Sink interface.
// Handles the synchronization of dmactive, which is used as a synchronous reset
// inside the Inner block.
// Also is the Sink side of hartsel & resumereq fields of DMCONTROL.
class TLDebugModuleInnerAsync(device: Device, getNComponents: () => Int, beatBytes: Int)(implicit p: Parameters) extends LazyModule{
val cfg = p(DebugModuleKey).get
val dmInner = LazyModule(new TLDebugModuleInner(device, getNComponents, beatBytes))
val dmiXing = LazyModule(new TLAsyncCrossingSink(AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset)))
val dmiNode = dmiXing.node
val tlNode = dmInner.tlNode
dmInner.dmiNode := dmiXing.node
// Require that there are no registers in TL interface, so that spurious
// processor accesses to the DM don't need to enable the clock. We don't
// require this property of the SBA, because the debugger is responsible for
// raising dmactive (hence enabling the clock) during these transactions.
require(dmInner.tlNode.concurrency == 0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
// Clock/reset domains:
// debug_clock / debug_reset = Debug inner domain
// tl_clock / tl_reset = tilelink domain (External: clock / reset)
//
val io = IO(new Bundle {
val debug_clock = Input(Clock())
val debug_reset = Input(Reset())
val tl_clock = Input(Clock())
val tl_reset = Input(Reset())
// These are all asynchronous and come from Outer
/** reset signal for DM */
val dmactive = Input(Bool())
/** conrol signals for Inner
*
* generated in Outer
*/
val innerCtrl = Flipped(new AsyncBundle(new DebugInternalBundle(getNComponents()), AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset)))
// This comes from tlClk domain.
/** debug available status */
val debugUnavail = Input(Vec(getNComponents(), Bool()))
/** debug interruption*/
val hgDebugInt = Output(Vec(getNComponents(), Bool()))
val extTrigger = (p(DebugModuleKey).get.nExtTriggers > 0).option(new DebugExtTriggerIO())
/** vector to indicate which hart is in reset
*
* dm receives it from core and sends it to Inner
*/
val hartIsInReset = Input(Vec(getNComponents(), Bool()))
/** Debug Authentication signals from core */
val auth = p(DebugModuleKey).get.hasAuthentication.option(new DebugAuthenticationIO())
})
val rf_reset = IO(Input(Reset())) // RF transform
childClock := io.debug_clock
childReset := io.debug_reset
override def provideImplicitClockToLazyChildren = true
val dmactive_synced = withClockAndReset(childClock, childReset) {
val dmactive_synced = AsyncResetSynchronizerShiftReg(in=io.dmactive, sync=3, name=Some("dmactiveSync"))
dmInner.module.clock := io.debug_clock
dmInner.module.reset := io.debug_reset
dmInner.module.io.tl_clock := io.tl_clock
dmInner.module.io.tl_reset := io.tl_reset
dmInner.module.io.dmactive := dmactive_synced
dmInner.module.io.innerCtrl <> FromAsyncBundle(io.innerCtrl)
dmInner.module.io.debugUnavail := io.debugUnavail
io.hgDebugInt := dmInner.module.io.hgDebugInt
io.extTrigger.foreach { x => dmInner.module.io.extTrigger.foreach {y => x <> y}}
dmInner.module.io.hartIsInReset := io.hartIsInReset
io.auth.foreach { x => dmInner.module.io.auth.foreach {y => x <> y}}
dmactive_synced
}
}
}
/** Create a version of the TLDebugModule which includes a synchronization interface
* internally for the DMI. This is no longer optional outside of this module
* because the Clock must run when tl_clock isn't running or tl_reset is asserted.
*/
class TLDebugModule(beatBytes: Int)(implicit p: Parameters) extends LazyModule {
val device = new SimpleDevice("debug-controller", Seq("sifive,debug-013","riscv,debug-013")){
override val alwaysExtended = true
override def describe(resources: ResourceBindings): Description = {
val Description(name, mapping) = super.describe(resources)
val attach = Map(
"debug-attach" -> (
(if (p(ExportDebug).apb) Seq(ResourceString("apb")) else Seq()) ++
(if (p(ExportDebug).jtag) Seq(ResourceString("jtag")) else Seq()) ++
(if (p(ExportDebug).cjtag) Seq(ResourceString("cjtag")) else Seq()) ++
(if (p(ExportDebug).dmi) Seq(ResourceString("dmi")) else Seq())))
Description(name, mapping ++ attach)
}
}
val dmOuter : TLDebugModuleOuterAsync = LazyModule(new TLDebugModuleOuterAsync(device)(p))
val dmInner : TLDebugModuleInnerAsync = LazyModule(new TLDebugModuleInnerAsync(device, () => {dmOuter.dmOuter.intnode.edges.out.size}, beatBytes)(p))
val node = dmInner.tlNode
val intnode = dmOuter.intnode
val apbNodeOpt = dmOuter.apbNodeOpt
dmInner.dmiNode := dmOuter.dmiInnerNode
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val nComponents = dmOuter.dmOuter.intnode.edges.out.size
// Clock/reset domains:
// tl_clock / tl_reset = tilelink domain
// debug_clock / debug_reset = Inner debug (synchronous to tl_clock)
// apb_clock / apb_reset = Outer debug with APB
// dmiClock / dmiReset = Outer debug without APB
//
val io = IO(new Bundle {
val debug_clock = Input(Clock())
val debug_reset = Input(Reset())
val tl_clock = Input(Clock())
val tl_reset = Input(Reset())
/** Debug control signals generated in Outer */
val ctrl = new DebugCtrlBundle(nComponents)
/** Debug Module Interface bewteen DM and DTM
*
* The DTM provides access to one or more Debug Modules (DMs) using DMI
*/
val dmi = (!p(ExportDebug).apb).option(Flipped(new ClockedDMIIO()))
val apb_clock = p(ExportDebug).apb.option(Input(Clock()))
val apb_reset = p(ExportDebug).apb.option(Input(Reset()))
val extTrigger = (p(DebugModuleKey).get.nExtTriggers > 0).option(new DebugExtTriggerIO())
/** vector to indicate which hart is in reset
*
* dm receives it from core and sends it to Inner
*/
val hartIsInReset = Input(Vec(nComponents, Bool()))
/** hart reset request generated by hartreset-logic in Outer */
val hartResetReq = p(DebugModuleKey).get.hasHartResets.option(Output(Vec(nComponents, Bool())))
/** Debug Authentication signals from core */
val auth = p(DebugModuleKey).get.hasAuthentication.option(new DebugAuthenticationIO())
})
childClock := io.tl_clock
childReset := io.tl_reset
override def provideImplicitClockToLazyChildren = true
dmOuter.module.io.dmi.foreach { dmOuterDMI =>
dmOuterDMI <> io.dmi.get.dmi
dmOuter.module.io.dmi_reset := io.dmi.get.dmiReset
dmOuter.module.io.dmi_clock := io.dmi.get.dmiClock
dmOuter.module.rf_reset := io.dmi.get.dmiReset
}
(io.apb_clock zip io.apb_reset) foreach { case (c, r) =>
dmOuter.module.io.dmi_reset := r
dmOuter.module.io.dmi_clock := c
dmOuter.module.rf_reset := r
}
dmInner.module.rf_reset := io.debug_reset
dmInner.module.io.debug_clock := io.debug_clock
dmInner.module.io.debug_reset := io.debug_reset
dmInner.module.io.tl_clock := io.tl_clock
dmInner.module.io.tl_reset := io.tl_reset
dmInner.module.io.innerCtrl <> dmOuter.module.io.innerCtrl
dmInner.module.io.dmactive := dmOuter.module.io.ctrl.dmactive
dmInner.module.io.debugUnavail := io.ctrl.debugUnavail
dmOuter.module.io.hgDebugInt := dmInner.module.io.hgDebugInt
io.ctrl <> dmOuter.module.io.ctrl
io.extTrigger.foreach { x => dmInner.module.io.extTrigger.foreach {y => x <> y}}
dmInner.module.io.hartIsInReset := io.hartIsInReset
io.hartResetReq.foreach { x => dmOuter.module.io.hartResetReq.foreach {y => x := y}}
io.auth.foreach { x => dmOuter.module.io.dmAuthenticated.get := x.dmAuthenticated }
io.auth.foreach { x => dmInner.module.io.auth.foreach {y => x <> y}}
}
}
| module TLDebugModuleInnerAsync( // @[Debug.scala:1871:9]
input [2:0] auto_dmiXing_in_a_mem_0_opcode, // @[LazyModuleImp.scala:107:25]
input [8:0] auto_dmiXing_in_a_mem_0_address, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_dmiXing_in_a_mem_0_data, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_a_ridx, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_a_widx, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_a_safe_ridx_valid, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_a_safe_widx_valid, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_a_safe_source_reset_n, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_a_safe_sink_reset_n, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dmiXing_in_d_mem_0_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_dmiXing_in_d_mem_0_size, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_d_mem_0_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_dmiXing_in_d_mem_0_data, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_d_ridx, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_d_widx, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_d_safe_ridx_valid, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_d_safe_widx_valid, // @[LazyModuleImp.scala:107:25]
output auto_dmiXing_in_d_safe_source_reset_n, // @[LazyModuleImp.scala:107:25]
input auto_dmiXing_in_d_safe_sink_reset_n, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_sb2tlOpt_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_dmInner_sb2tlOpt_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dmInner_sb2tlOpt_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_dmInner_sb2tlOpt_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_dmInner_sb2tlOpt_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_dmInner_sb2tlOpt_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_dmInner_sb2tlOpt_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_sb2tlOpt_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dmInner_sb2tlOpt_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dmInner_sb2tlOpt_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dmInner_sb2tlOpt_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dmInner_sb2tlOpt_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_sb2tlOpt_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_dmInner_sb2tlOpt_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_sb2tlOpt_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_dmInner_tl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_tl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dmInner_tl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dmInner_tl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dmInner_tl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_dmInner_tl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [11:0] auto_dmInner_tl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_dmInner_tl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_dmInner_tl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_tl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_dmInner_tl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_dmInner_tl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_dmInner_tl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_dmInner_tl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_dmInner_tl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_dmInner_tl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input io_debug_clock, // @[Debug.scala:1877:16]
input io_debug_reset, // @[Debug.scala:1877:16]
input io_tl_clock, // @[Debug.scala:1877:16]
input io_tl_reset, // @[Debug.scala:1877:16]
input io_dmactive, // @[Debug.scala:1877:16]
input io_innerCtrl_mem_0_resumereq, // @[Debug.scala:1877:16]
input [9:0] io_innerCtrl_mem_0_hartsel, // @[Debug.scala:1877:16]
input io_innerCtrl_mem_0_ackhavereset, // @[Debug.scala:1877:16]
input io_innerCtrl_mem_0_hrmask_0, // @[Debug.scala:1877:16]
output io_innerCtrl_ridx, // @[Debug.scala:1877:16]
input io_innerCtrl_widx, // @[Debug.scala:1877:16]
output io_innerCtrl_safe_ridx_valid, // @[Debug.scala:1877:16]
input io_innerCtrl_safe_widx_valid, // @[Debug.scala:1877:16]
input io_innerCtrl_safe_source_reset_n, // @[Debug.scala:1877:16]
output io_innerCtrl_safe_sink_reset_n, // @[Debug.scala:1877:16]
output io_hgDebugInt_0, // @[Debug.scala:1877:16]
input io_hartIsInReset_0 // @[Debug.scala:1877:16]
);
wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_valid; // @[AsyncQueue.scala:211:22]
wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_resumereq; // @[AsyncQueue.scala:211:22]
wire [9:0] _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hartsel; // @[AsyncQueue.scala:211:22]
wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_ackhavereset; // @[AsyncQueue.scala:211:22]
wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_0; // @[AsyncQueue.scala:211:22]
wire _dmactive_synced_dmactive_synced_dmactiveSync_io_q; // @[ShiftReg.scala:45:23]
wire _dmiXing_auto_out_a_valid; // @[Debug.scala:1858:27]
wire [2:0] _dmiXing_auto_out_a_bits_opcode; // @[Debug.scala:1858:27]
wire [2:0] _dmiXing_auto_out_a_bits_param; // @[Debug.scala:1858:27]
wire [1:0] _dmiXing_auto_out_a_bits_size; // @[Debug.scala:1858:27]
wire _dmiXing_auto_out_a_bits_source; // @[Debug.scala:1858:27]
wire [8:0] _dmiXing_auto_out_a_bits_address; // @[Debug.scala:1858:27]
wire [3:0] _dmiXing_auto_out_a_bits_mask; // @[Debug.scala:1858:27]
wire [31:0] _dmiXing_auto_out_a_bits_data; // @[Debug.scala:1858:27]
wire _dmiXing_auto_out_a_bits_corrupt; // @[Debug.scala:1858:27]
wire _dmiXing_auto_out_d_ready; // @[Debug.scala:1858:27]
wire _dmInner_auto_dmi_in_a_ready; // @[Debug.scala:1857:27]
wire _dmInner_auto_dmi_in_d_valid; // @[Debug.scala:1857:27]
wire [2:0] _dmInner_auto_dmi_in_d_bits_opcode; // @[Debug.scala:1857:27]
wire [1:0] _dmInner_auto_dmi_in_d_bits_size; // @[Debug.scala:1857:27]
wire _dmInner_auto_dmi_in_d_bits_source; // @[Debug.scala:1857:27]
wire [31:0] _dmInner_auto_dmi_in_d_bits_data; // @[Debug.scala:1857:27]
TLDebugModuleInner dmInner ( // @[Debug.scala:1857:27]
.clock (io_debug_clock),
.reset (io_debug_reset),
.auto_sb2tlOpt_out_a_ready (auto_dmInner_sb2tlOpt_out_a_ready),
.auto_sb2tlOpt_out_a_valid (auto_dmInner_sb2tlOpt_out_a_valid),
.auto_sb2tlOpt_out_a_bits_opcode (auto_dmInner_sb2tlOpt_out_a_bits_opcode),
.auto_sb2tlOpt_out_a_bits_size (auto_dmInner_sb2tlOpt_out_a_bits_size),
.auto_sb2tlOpt_out_a_bits_address (auto_dmInner_sb2tlOpt_out_a_bits_address),
.auto_sb2tlOpt_out_a_bits_data (auto_dmInner_sb2tlOpt_out_a_bits_data),
.auto_sb2tlOpt_out_d_ready (auto_dmInner_sb2tlOpt_out_d_ready),
.auto_sb2tlOpt_out_d_valid (auto_dmInner_sb2tlOpt_out_d_valid),
.auto_sb2tlOpt_out_d_bits_opcode (auto_dmInner_sb2tlOpt_out_d_bits_opcode),
.auto_sb2tlOpt_out_d_bits_param (auto_dmInner_sb2tlOpt_out_d_bits_param),
.auto_sb2tlOpt_out_d_bits_size (auto_dmInner_sb2tlOpt_out_d_bits_size),
.auto_sb2tlOpt_out_d_bits_sink (auto_dmInner_sb2tlOpt_out_d_bits_sink),
.auto_sb2tlOpt_out_d_bits_denied (auto_dmInner_sb2tlOpt_out_d_bits_denied),
.auto_sb2tlOpt_out_d_bits_data (auto_dmInner_sb2tlOpt_out_d_bits_data),
.auto_sb2tlOpt_out_d_bits_corrupt (auto_dmInner_sb2tlOpt_out_d_bits_corrupt),
.auto_tl_in_a_ready (auto_dmInner_tl_in_a_ready),
.auto_tl_in_a_valid (auto_dmInner_tl_in_a_valid),
.auto_tl_in_a_bits_opcode (auto_dmInner_tl_in_a_bits_opcode),
.auto_tl_in_a_bits_param (auto_dmInner_tl_in_a_bits_param),
.auto_tl_in_a_bits_size (auto_dmInner_tl_in_a_bits_size),
.auto_tl_in_a_bits_source (auto_dmInner_tl_in_a_bits_source),
.auto_tl_in_a_bits_address (auto_dmInner_tl_in_a_bits_address),
.auto_tl_in_a_bits_mask (auto_dmInner_tl_in_a_bits_mask),
.auto_tl_in_a_bits_data (auto_dmInner_tl_in_a_bits_data),
.auto_tl_in_a_bits_corrupt (auto_dmInner_tl_in_a_bits_corrupt),
.auto_tl_in_d_ready (auto_dmInner_tl_in_d_ready),
.auto_tl_in_d_valid (auto_dmInner_tl_in_d_valid),
.auto_tl_in_d_bits_opcode (auto_dmInner_tl_in_d_bits_opcode),
.auto_tl_in_d_bits_size (auto_dmInner_tl_in_d_bits_size),
.auto_tl_in_d_bits_source (auto_dmInner_tl_in_d_bits_source),
.auto_tl_in_d_bits_data (auto_dmInner_tl_in_d_bits_data),
.auto_dmi_in_a_ready (_dmInner_auto_dmi_in_a_ready),
.auto_dmi_in_a_valid (_dmiXing_auto_out_a_valid), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_opcode (_dmiXing_auto_out_a_bits_opcode), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_param (_dmiXing_auto_out_a_bits_param), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_size (_dmiXing_auto_out_a_bits_size), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_source (_dmiXing_auto_out_a_bits_source), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_address (_dmiXing_auto_out_a_bits_address), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_mask (_dmiXing_auto_out_a_bits_mask), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_data (_dmiXing_auto_out_a_bits_data), // @[Debug.scala:1858:27]
.auto_dmi_in_a_bits_corrupt (_dmiXing_auto_out_a_bits_corrupt), // @[Debug.scala:1858:27]
.auto_dmi_in_d_ready (_dmiXing_auto_out_d_ready), // @[Debug.scala:1858:27]
.auto_dmi_in_d_valid (_dmInner_auto_dmi_in_d_valid),
.auto_dmi_in_d_bits_opcode (_dmInner_auto_dmi_in_d_bits_opcode),
.auto_dmi_in_d_bits_size (_dmInner_auto_dmi_in_d_bits_size),
.auto_dmi_in_d_bits_source (_dmInner_auto_dmi_in_d_bits_source),
.auto_dmi_in_d_bits_data (_dmInner_auto_dmi_in_d_bits_data),
.io_dmactive (_dmactive_synced_dmactive_synced_dmactiveSync_io_q), // @[ShiftReg.scala:45:23]
.io_innerCtrl_valid (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_valid), // @[AsyncQueue.scala:211:22]
.io_innerCtrl_bits_resumereq (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_resumereq), // @[AsyncQueue.scala:211:22]
.io_innerCtrl_bits_hartsel (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hartsel), // @[AsyncQueue.scala:211:22]
.io_innerCtrl_bits_ackhavereset (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_ackhavereset), // @[AsyncQueue.scala:211:22]
.io_innerCtrl_bits_hrmask_0 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_0), // @[AsyncQueue.scala:211:22]
.io_hgDebugInt_0 (io_hgDebugInt_0),
.io_hartIsInReset_0 (io_hartIsInReset_0),
.io_tl_clock (io_tl_clock),
.io_tl_reset (io_tl_reset)
); // @[Debug.scala:1857:27]
TLAsyncCrossingSink_a9d32s1k1z2u dmiXing ( // @[Debug.scala:1858:27]
.clock (io_debug_clock),
.reset (io_debug_reset),
.auto_in_a_mem_0_opcode (auto_dmiXing_in_a_mem_0_opcode),
.auto_in_a_mem_0_address (auto_dmiXing_in_a_mem_0_address),
.auto_in_a_mem_0_data (auto_dmiXing_in_a_mem_0_data),
.auto_in_a_ridx (auto_dmiXing_in_a_ridx),
.auto_in_a_widx (auto_dmiXing_in_a_widx),
.auto_in_a_safe_ridx_valid (auto_dmiXing_in_a_safe_ridx_valid),
.auto_in_a_safe_widx_valid (auto_dmiXing_in_a_safe_widx_valid),
.auto_in_a_safe_source_reset_n (auto_dmiXing_in_a_safe_source_reset_n),
.auto_in_a_safe_sink_reset_n (auto_dmiXing_in_a_safe_sink_reset_n),
.auto_in_d_mem_0_opcode (auto_dmiXing_in_d_mem_0_opcode),
.auto_in_d_mem_0_size (auto_dmiXing_in_d_mem_0_size),
.auto_in_d_mem_0_source (auto_dmiXing_in_d_mem_0_source),
.auto_in_d_mem_0_data (auto_dmiXing_in_d_mem_0_data),
.auto_in_d_ridx (auto_dmiXing_in_d_ridx),
.auto_in_d_widx (auto_dmiXing_in_d_widx),
.auto_in_d_safe_ridx_valid (auto_dmiXing_in_d_safe_ridx_valid),
.auto_in_d_safe_widx_valid (auto_dmiXing_in_d_safe_widx_valid),
.auto_in_d_safe_source_reset_n (auto_dmiXing_in_d_safe_source_reset_n),
.auto_in_d_safe_sink_reset_n (auto_dmiXing_in_d_safe_sink_reset_n),
.auto_out_a_ready (_dmInner_auto_dmi_in_a_ready), // @[Debug.scala:1857:27]
.auto_out_a_valid (_dmiXing_auto_out_a_valid),
.auto_out_a_bits_opcode (_dmiXing_auto_out_a_bits_opcode),
.auto_out_a_bits_param (_dmiXing_auto_out_a_bits_param),
.auto_out_a_bits_size (_dmiXing_auto_out_a_bits_size),
.auto_out_a_bits_source (_dmiXing_auto_out_a_bits_source),
.auto_out_a_bits_address (_dmiXing_auto_out_a_bits_address),
.auto_out_a_bits_mask (_dmiXing_auto_out_a_bits_mask),
.auto_out_a_bits_data (_dmiXing_auto_out_a_bits_data),
.auto_out_a_bits_corrupt (_dmiXing_auto_out_a_bits_corrupt),
.auto_out_d_ready (_dmiXing_auto_out_d_ready),
.auto_out_d_valid (_dmInner_auto_dmi_in_d_valid), // @[Debug.scala:1857:27]
.auto_out_d_bits_opcode (_dmInner_auto_dmi_in_d_bits_opcode), // @[Debug.scala:1857:27]
.auto_out_d_bits_size (_dmInner_auto_dmi_in_d_bits_size), // @[Debug.scala:1857:27]
.auto_out_d_bits_source (_dmInner_auto_dmi_in_d_bits_source), // @[Debug.scala:1857:27]
.auto_out_d_bits_data (_dmInner_auto_dmi_in_d_bits_data) // @[Debug.scala:1857:27]
); // @[Debug.scala:1858:27]
AsyncResetSynchronizerShiftReg_w1_d3_i0 dmactive_synced_dmactive_synced_dmactiveSync ( // @[ShiftReg.scala:45:23]
.clock (io_debug_clock),
.reset (io_debug_reset),
.io_d (io_dmactive),
.io_q (_dmactive_synced_dmactive_synced_dmactiveSync_io_q)
); // @[ShiftReg.scala:45:23]
AsyncQueueSink_DebugInternalBundle dmactive_synced_dmInner_io_innerCtrl_sink ( // @[AsyncQueue.scala:211:22]
.clock (io_debug_clock),
.reset (io_debug_reset),
.io_deq_valid (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_valid),
.io_deq_bits_resumereq (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_resumereq),
.io_deq_bits_hartsel (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hartsel),
.io_deq_bits_ackhavereset (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_ackhavereset),
.io_deq_bits_hrmask_0 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_0),
.io_async_mem_0_resumereq (io_innerCtrl_mem_0_resumereq),
.io_async_mem_0_hartsel (io_innerCtrl_mem_0_hartsel),
.io_async_mem_0_ackhavereset (io_innerCtrl_mem_0_ackhavereset),
.io_async_mem_0_hrmask_0 (io_innerCtrl_mem_0_hrmask_0),
.io_async_ridx (io_innerCtrl_ridx),
.io_async_widx (io_innerCtrl_widx),
.io_async_safe_ridx_valid (io_innerCtrl_safe_ridx_valid),
.io_async_safe_widx_valid (io_innerCtrl_safe_widx_valid),
.io_async_safe_source_reset_n (io_innerCtrl_safe_source_reset_n),
.io_async_safe_sink_reset_n (io_innerCtrl_safe_sink_reset_n)
); // @[AsyncQueue.scala:211:22]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_14( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [26:0] _GEN = {23'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [5:0] source; // @[Monitor.scala:390:22]
reg [28:0] address; // @[Monitor.scala:391:22]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [5:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [34:0] inflight; // @[Monitor.scala:614:27]
reg [139:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [279:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire [63:0] _GEN_0 = {58'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [63:0] _GEN_3 = {58'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [34:0] inflight_1; // @[Monitor.scala:726:35]
reg [279:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File IngressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class IngressUnit(
ingressNodeId: Int,
cParam: IngressChannelParams,
outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean,
combineSAST: Boolean,
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits)))
}
val io = IO(new IngressUnitIO)
val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2,
flow=combineRCVA))
assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR))
route_buffer.io.enq.bits.head := io.in.bits.head
route_buffer.io.enq.bits.tail := io.in.bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
route_buffer.io.enq.bits.flow := DontCare
} else {
route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U
route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U
route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U
route_buffer.io.enq.bits.flow.egress_node := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNode.U)
)
route_buffer.io.enq.bits.flow.egress_node_id := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNodeId.U)
)
}
route_buffer.io.enq.bits.payload := io.in.bits.payload
route_buffer.io.enq.bits.virt_channel_id := DontCare
io.router_req.bits.src_virt_id := 0.U
io.router_req.bits.flow := route_buffer.io.enq.bits.flow
val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U
route_buffer.io.enq.valid := io.in.valid && (
io.router_req.ready || !io.in.bits.head || at_dest)
io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest
io.in.ready := route_buffer.io.enq.ready && (
io.router_req.ready || !io.in.bits.head || at_dest)
route_q.io.enq.valid := io.router_req.fire
route_q.io.enq.bits := io.router_resp
when (io.in.fire && io.in.bits.head && at_dest) {
route_q.io.enq.valid := true.B
route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (egressParams(o).egressId.U === io.in.bits.egress_id) {
route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B
}
}
}
assert(!(route_q.io.enq.valid && !route_q.io.enq.ready))
val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams),
1, pipe=true))
vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits
io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel
io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow
io.vcalloc_req.bits.in_vc := 0.U
val head = route_buffer.io.deq.bits.head
val tail = route_buffer.io.deq.bits.tail
vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head)
)
io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid &&
head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready)
route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head) &&
(vcalloc_q.io.enq.ready || !head))
route_q.io.deq.ready := (route_buffer.io.deq.fire && tail)
vcalloc_q.io.enq.valid := io.vcalloc_req.fire
vcalloc_q.io.enq.bits := io.vcalloc_resp
assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready))
io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel
io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail
val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail
io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire
val out_bundle = if (combineSAST) {
Wire(Valid(new SwitchBundle(outParams, egressParams)))
} else {
Reg(Valid(new SwitchBundle(outParams, egressParams)))
}
io.out(0) := out_bundle
out_bundle.valid := vcalloc_buffer.io.deq.fire
out_bundle.bits.flit := vcalloc_buffer.io.deq.bits
out_bundle.bits.flit.virt_channel_id := 0.U
val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq
out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh,
vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq)
io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready
io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready
// TODO: We should not generate input/ingress/output/egress units for untraversable channels
if (!cParam.traversable) {
io.in.ready := false.B
io.router_req.valid := false.B
io.router_req.bits := DontCare
io.vcalloc_req.valid := false.B
io.vcalloc_req.bits := DontCare
io.salloc_req.foreach(_.valid := false.B)
io.salloc_req.foreach(_.bits := DontCare)
io.out.foreach(_.valid := false.B)
io.out.foreach(_.bits := DontCare)
}
}
| module IngressUnit_38( // @[IngressUnit.scala:11:7]
input clock, // @[IngressUnit.scala:11:7]
input reset // @[IngressUnit.scala:11:7]
);
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
File AsyncCrossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, NodeHandle}
import freechips.rocketchip.prci.{AsynchronousCrossing}
import freechips.rocketchip.subsystem.CrossingWrapper
import freechips.rocketchip.util.{AsyncQueueParams, ToAsyncBundle, FromAsyncBundle, Pow2ClockDivider, property}
class TLAsyncCrossingSource(sync: Option[Int])(implicit p: Parameters) extends LazyModule
{
def this(x: Int)(implicit p: Parameters) = this(Some(x))
def this()(implicit p: Parameters) = this(None)
val node = TLAsyncSourceNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLAsyncCrossingSource") ++ node.in.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val bce = edgeIn.manager.anySupportAcquireB && edgeIn.client.anySupportProbe
val psync = sync.getOrElse(edgeOut.manager.async.sync)
val params = edgeOut.manager.async.copy(sync = psync)
out.a <> ToAsyncBundle(in.a, params)
in.d <> FromAsyncBundle(out.d, psync)
property.cover(in.a, "TL_ASYNC_CROSSING_SOURCE_A", "MemorySystem;;TLAsyncCrossingSource Channel A")
property.cover(in.d, "TL_ASYNC_CROSSING_SOURCE_D", "MemorySystem;;TLAsyncCrossingSource Channel D")
if (bce) {
in.b <> FromAsyncBundle(out.b, psync)
out.c <> ToAsyncBundle(in.c, params)
out.e <> ToAsyncBundle(in.e, params)
property.cover(in.b, "TL_ASYNC_CROSSING_SOURCE_B", "MemorySystem;;TLAsyncCrossingSource Channel B")
property.cover(in.c, "TL_ASYNC_CROSSING_SOURCE_C", "MemorySystem;;TLAsyncCrossingSource Channel C")
property.cover(in.e, "TL_ASYNC_CROSSING_SOURCE_E", "MemorySystem;;TLAsyncCrossingSource Channel E")
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ridx := 0.U
out.c.widx := 0.U
out.e.widx := 0.U
}
}
}
}
class TLAsyncCrossingSink(params: AsyncQueueParams = AsyncQueueParams())(implicit p: Parameters) extends LazyModule
{
val node = TLAsyncSinkNode(params)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLAsyncCrossingSink") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val bce = edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe
out.a <> FromAsyncBundle(in.a, params.sync)
in.d <> ToAsyncBundle(out.d, params)
property.cover(out.a, "TL_ASYNC_CROSSING_SINK_A", "MemorySystem;;TLAsyncCrossingSink Channel A")
property.cover(out.d, "TL_ASYNC_CROSSING_SINK_D", "MemorySystem;;TLAsyncCrossingSink Channel D")
if (bce) {
in.b <> ToAsyncBundle(out.b, params)
out.c <> FromAsyncBundle(in.c, params.sync)
out.e <> FromAsyncBundle(in.e, params.sync)
property.cover(out.b, "TL_ASYNC_CROSSING_SINK_B", "MemorySystem;;TLAsyncCrossingSinkChannel B")
property.cover(out.c, "TL_ASYNC_CROSSING_SINK_C", "MemorySystem;;TLAsyncCrossingSink Channel C")
property.cover(out.e, "TL_ASYNC_CROSSING_SINK_E", "MemorySystem;;TLAsyncCrossingSink Channel E")
} else {
in.b.widx := 0.U
in.c.ridx := 0.U
in.e.ridx := 0.U
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLAsyncCrossingSource
{
def apply()(implicit p: Parameters): TLAsyncSourceNode = apply(None)
def apply(sync: Int)(implicit p: Parameters): TLAsyncSourceNode = apply(Some(sync))
def apply(sync: Option[Int])(implicit p: Parameters): TLAsyncSourceNode =
{
val asource = LazyModule(new TLAsyncCrossingSource(sync))
asource.node
}
}
object TLAsyncCrossingSink
{
def apply(params: AsyncQueueParams = AsyncQueueParams())(implicit p: Parameters) =
{
val asink = LazyModule(new TLAsyncCrossingSink(params))
asink.node
}
}
@deprecated("TLAsyncCrossing is fragile. Use TLAsyncCrossingSource and TLAsyncCrossingSink", "rocket-chip 1.2")
class TLAsyncCrossing(params: AsyncQueueParams = AsyncQueueParams())(implicit p: Parameters) extends LazyModule
{
val source = LazyModule(new TLAsyncCrossingSource())
val sink = LazyModule(new TLAsyncCrossingSink(params))
val node = NodeHandle(source.node, sink.node)
sink.node := source.node
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val in_clock = Input(Clock())
val in_reset = Input(Bool())
val out_clock = Input(Clock())
val out_reset = Input(Bool())
})
source.module.clock := io.in_clock
source.module.reset := io.in_reset
sink.module.clock := io.out_clock
sink.module.reset := io.out_reset
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMAsyncCrossing(txns: Int, params: AsynchronousCrossing = AsynchronousCrossing())(implicit p: Parameters) extends LazyModule {
val model = LazyModule(new TLRAMModel("AsyncCrossing"))
val fuzz = LazyModule(new TLFuzzer(txns))
val island = LazyModule(new CrossingWrapper(params))
val ram = island { LazyModule(new TLRAM(AddressSet(0x0, 0x3ff))) }
island.crossTLIn(ram.node) := TLFragmenter(4, 256) := TLDelayer(0.1) := model.node := fuzz.node
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
// Shove the RAM into another clock domain
val clocks = Module(new Pow2ClockDivider(2))
island.module.clock := clocks.io.clock_out
}
}
class TLRAMAsyncCrossingTest(txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut_wide = Module(LazyModule(new TLRAMAsyncCrossing(txns)).module)
val dut_narrow = Module(LazyModule(new TLRAMAsyncCrossing(txns, AsynchronousCrossing(safe = false, narrow = true))).module)
io.finished := dut_wide.io.finished && dut_narrow.io.finished
dut_wide.io.start := io.start
dut_narrow.io.start := io.start
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Debug.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.devices.debug
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.apb.{APBFanout, APBToTL}
import freechips.rocketchip.devices.debug.systembusaccess.{SBToTL, SystemBusAccessModule}
import freechips.rocketchip.devices.tilelink.{DevNullParams, TLBusBypass, TLError}
import freechips.rocketchip.diplomacy.{AddressSet, BufferParams}
import freechips.rocketchip.resources.{Description, Device, Resource, ResourceBindings, ResourceString, SimpleDevice}
import freechips.rocketchip.interrupts.{IntNexusNode, IntSinkParameters, IntSinkPortParameters, IntSourceParameters, IntSourcePortParameters, IntSyncCrossingSource, IntSyncIdentityNode}
import freechips.rocketchip.regmapper.{RegField, RegFieldAccessType, RegFieldDesc, RegFieldGroup, RegFieldWrType, RegReadFn, RegWriteFn}
import freechips.rocketchip.rocket.{CSRs, Instructions}
import freechips.rocketchip.tile.MaxHartIdBits
import freechips.rocketchip.tilelink.{TLAsyncCrossingSink, TLAsyncCrossingSource, TLBuffer, TLRegisterNode, TLXbar}
import freechips.rocketchip.util.{Annotated, AsyncBundle, AsyncQueueParams, AsyncResetSynchronizerShiftReg, FromAsyncBundle, ParameterizedBundle, ResetSynchronizerShiftReg, ToAsyncBundle}
import freechips.rocketchip.util.SeqBoolBitwiseOps
import freechips.rocketchip.util.SeqToAugmentedSeq
import freechips.rocketchip.util.BooleanToAugmentedBoolean
object DsbBusConsts {
def sbAddrWidth = 12
def sbIdWidth = 10
}
object DsbRegAddrs{
// These are used by the ROM.
def HALTED = 0x100
def GOING = 0x104
def RESUMING = 0x108
def EXCEPTION = 0x10C
def WHERETO = 0x300
// This needs to be aligned for up to lq/sq
// This shows up in HartInfo, and needs to be aligned
// to enable up to LQ/SQ instructions.
def DATA = 0x380
// We want DATA to immediately follow PROGBUF so that we can
// use them interchangeably. Leave another slot if there is an
// implicit ebreak.
def PROGBUF(cfg:DebugModuleParams) = {
val tmp = DATA - (cfg.nProgramBufferWords * 4)
if (cfg.hasImplicitEbreak) (tmp - 4) else tmp
}
// This is unused if hasImpEbreak is false, and just points to the end of the PROGBUF.
def IMPEBREAK(cfg: DebugModuleParams) = { DATA - 4 }
// We want abstract to be immediately before PROGBUF
// because we auto-generate 2 (or 5) instructions.
def ABSTRACT(cfg:DebugModuleParams) = PROGBUF(cfg) - (cfg.nAbstractInstructions * 4)
def FLAGS = 0x400
def ROMBASE = 0x800
}
/** Enumerations used both in the hardware
* and in the configuration specification.
*/
object DebugModuleAccessType extends scala.Enumeration {
type DebugModuleAccessType = Value
val Access8Bit, Access16Bit, Access32Bit, Access64Bit, Access128Bit = Value
}
object DebugAbstractCommandError extends scala.Enumeration {
type DebugAbstractCommandError = Value
val Success, ErrBusy, ErrNotSupported, ErrException, ErrHaltResume = Value
}
object DebugAbstractCommandType extends scala.Enumeration {
type DebugAbstractCommandType = Value
val AccessRegister, QuickAccess = Value
}
/** Parameters exposed to the top-level design, set based on
* external requirements, etc.
*
* This object checks that the parameters conform to the
* full specification. The implementation which receives this
* object can perform more checks on what that implementation
* actually supports.
* @param nComponents Number of components to support debugging.
* @param baseAddress Base offest for debugEntry and debugException
* @param nDMIAddrSize Size of the Debug Bus Address
* @param nAbstractDataWords Number of 32-bit words for Abstract Commands
* @param nProgamBufferWords Number of 32-bit words for Program Buffer
* @param hasBusMaster Whether or not a bus master should be included
* @param clockGate Whether or not to use dmactive as the clockgate for debug module
* @param maxSupportedSBAccess Maximum transaction size supported by System Bus Access logic.
* @param supportQuickAccess Whether or not to support the quick access command.
* @param supportHartArray Whether or not to implement the hart array register (if >1 hart).
* @param nHaltGroups Number of halt groups
* @param nExtTriggers Number of external triggers
* @param hasHartResets Feature to reset all the currently selected harts
* @param hasImplicitEbreak There is an additional RO program buffer word containing an ebreak
* @param crossingHasSafeReset Include "safe" logic in Async Crossings so that only one side needs to be reset.
*/
case class DebugModuleParams (
baseAddress : BigInt = BigInt(0),
nDMIAddrSize : Int = 7,
nProgramBufferWords: Int = 16,
nAbstractDataWords : Int = 4,
nScratch : Int = 1,
hasBusMaster : Boolean = false,
clockGate : Boolean = true,
maxSupportedSBAccess : Int = 32,
supportQuickAccess : Boolean = false,
supportHartArray : Boolean = true,
nHaltGroups : Int = 1,
nExtTriggers : Int = 0,
hasHartResets : Boolean = false,
hasImplicitEbreak : Boolean = false,
hasAuthentication : Boolean = false,
crossingHasSafeReset : Boolean = true
) {
require ((nDMIAddrSize >= 7) && (nDMIAddrSize <= 32), s"Legal DMIAddrSize is 7-32, not ${nDMIAddrSize}")
require ((nAbstractDataWords > 0) && (nAbstractDataWords <= 16), s"Legal nAbstractDataWords is 0-16, not ${nAbstractDataWords}")
require ((nProgramBufferWords >= 0) && (nProgramBufferWords <= 16), s"Legal nProgramBufferWords is 0-16, not ${nProgramBufferWords}")
require (nHaltGroups < 32, s"Legal nHaltGroups is 0-31, not ${nHaltGroups}")
require (nExtTriggers <= 16, s"Legal nExtTriggers is 0-16, not ${nExtTriggers}")
if (supportQuickAccess) {
// TODO: Check that quick access requirements are met.
}
def address = AddressSet(baseAddress, 0xFFF)
/** the base address of DM */
def atzero = (baseAddress == 0)
/** The number of generated instructions
*
* When the base address is not zero, we need more instruction also,
* more dscratch registers) to load/store memory mapped data register
* because they may no longer be directly addressible with x0 + 12-bit imm
*/
def nAbstractInstructions = if (atzero) 2 else 5
def debugEntry: BigInt = baseAddress + 0x800
def debugException: BigInt = baseAddress + 0x808
def nDscratch: Int = if (atzero) 1 else 2
}
object DefaultDebugModuleParams {
def apply(xlen:Int /*TODO , val configStringAddr: Int*/): DebugModuleParams = {
new DebugModuleParams().copy(
nAbstractDataWords = (if (xlen == 32) 1 else if (xlen == 64) 2 else 4),
maxSupportedSBAccess = xlen
)
}
}
case object DebugModuleKey extends Field[Option[DebugModuleParams]](Some(DebugModuleParams()))
/** Functional parameters exposed to the design configuration.
*
* hartIdToHartSel: For systems where hart ids are not 1:1 with hartsel, provide the mapping.
* hartSelToHartId: Provide inverse mapping of the above
*/
case class DebugModuleHartSelFuncs (
hartIdToHartSel : (UInt) => UInt = (x:UInt) => x,
hartSelToHartId : (UInt) => UInt = (x:UInt) => x
)
case object DebugModuleHartSelKey extends Field(DebugModuleHartSelFuncs())
class DebugExtTriggerOut (val nExtTriggers: Int) extends Bundle {
val req = Output(UInt(nExtTriggers.W))
val ack = Input(UInt(nExtTriggers.W))
}
class DebugExtTriggerIn (val nExtTriggers: Int) extends Bundle {
val req = Input(UInt(nExtTriggers.W))
val ack = Output(UInt(nExtTriggers.W))
}
class DebugExtTriggerIO () (implicit val p: Parameters) extends ParameterizedBundle()(p) {
val out = new DebugExtTriggerOut(p(DebugModuleKey).get.nExtTriggers)
val in = new DebugExtTriggerIn (p(DebugModuleKey).get.nExtTriggers)
}
class DebugAuthenticationIO () (implicit val p: Parameters) extends ParameterizedBundle()(p) {
val dmactive = Output(Bool())
val dmAuthWrite = Output(Bool())
val dmAuthRead = Output(Bool())
val dmAuthWdata = Output(UInt(32.W))
val dmAuthBusy = Input(Bool())
val dmAuthRdata = Input(UInt(32.W))
val dmAuthenticated = Input(Bool())
}
// *****************************************
// Module Interfaces
//
// *****************************************
/** Control signals for Inner, generated in Outer
* {{{
* run control: resumreq, ackhavereset, halt-on-reset mask
* hart select: hasel, hartsel and the hart array mask
* }}}
*/
class DebugInternalBundle (val nComponents: Int)(implicit val p: Parameters) extends ParameterizedBundle()(p) {
/** resume request */
val resumereq = Bool()
/** hart select */
val hartsel = UInt(10.W)
/** reset acknowledge */
val ackhavereset = Bool()
/** hart array enable */
val hasel = Bool()
/** hart array mask */
val hamask = Vec(nComponents, Bool())
/** halt-on-reset mask */
val hrmask = Vec(nComponents, Bool())
}
/** structure for top-level Debug Module signals which aren't the bus interfaces. */
class DebugCtrlBundle (nComponents: Int)(implicit val p: Parameters) extends ParameterizedBundle()(p) {
/** debug availability status for all harts */
val debugUnavail = Input(Vec(nComponents, Bool()))
/** reset signal
*
* for every part of the hardware platform,
* including every hart, except for the DM and any
* logic required to access the DM
*/
val ndreset = Output(Bool())
/** reset signal for the DM itself */
val dmactive = Output(Bool())
/** dmactive acknowlege */
val dmactiveAck = Input(Bool())
}
// *****************************************
// Debug Module
//
// *****************************************
/** Parameterized version of the Debug Module defined in the
* RISC-V Debug Specification
*
* DebugModule is a slave to two asynchronous masters:
* The Debug Bus (DMI) -- This is driven by an external debugger
*
* The System Bus -- This services requests from the cores. Generally
* this interface should only be active at the request
* of the debugger, but the Debug Module may also
* provide the default MTVEC since it is mapped
* to address 0x0.
*
* DebugModule is responsible for control registers and RAM, and
* Debug ROM. It runs partially off of the dmiClk (e.g. TCK) and
* the TL clock. Therefore, it is divided into "Outer" portion (running
* off dmiClock and dmiReset) and "Inner" (running off tl_clock and tl_reset).
* This allows DMCONTROL.haltreq, hartsel, hasel, hawindowsel, hawindow, dmactive,
* and ndreset to be modified even while the Core is in reset or not being clocked.
* Not all reads from the Debugger to the Debug Module will actually complete
* in these scenarios either, they will just block until tl_clock and tl_reset
* allow them to complete. This is not strictly necessary for
* proper debugger functionality.
*/
// Local reg mapper function : Notify when written, but give the value as well.
object WNotifyWire {
def apply(n: Int, value: UInt, set: Bool, name: String, desc: String) : RegField = {
RegField(n, 0.U, RegWriteFn((valid, data) => {
set := valid
value := data
true.B
}), Some(RegFieldDesc(name = name, desc = desc,
access = RegFieldAccessType.W)))
}
}
// Local reg mapper function : Notify when accessed either as read or write.
object RWNotify {
def apply (n: Int, rVal: UInt, wVal: UInt, rNotify: Bool, wNotify: Bool, desc: Option[RegFieldDesc] = None): RegField = {
RegField(n,
RegReadFn ((ready) => {rNotify := ready ; (true.B, rVal)}),
RegWriteFn((valid, data) => {
wNotify := valid
when (valid) {wVal := data}
true.B
}
), desc)
}
}
// Local reg mapper function : Notify with value when written, take read input as presented.
// This allows checking or correcting the write value before storing it in the register field.
object WNotifyVal {
def apply(n: Int, rVal: UInt, wVal: UInt, wNotify: Bool, desc: RegFieldDesc): RegField = {
RegField(n, rVal, RegWriteFn((valid, data) => {
wNotify := valid
wVal := data
true.B
}
), desc)
}
}
class TLDebugModuleOuter(device: Device)(implicit p: Parameters) extends LazyModule {
// For Shorter Register Names
import DMI_RegAddrs._
val cfg = p(DebugModuleKey).get
val intnode = IntNexusNode(
sourceFn = { _ => IntSourcePortParameters(Seq(IntSourceParameters(1, Seq(Resource(device, "int"))))) },
sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) },
outputRequiresInput = false)
val dmiNode = TLRegisterNode (
address = AddressSet.misaligned(DMI_DMCONTROL << 2, 4) ++
AddressSet.misaligned(DMI_HARTINFO << 2, 4) ++
AddressSet.misaligned(DMI_HAWINDOWSEL << 2, 4) ++
AddressSet.misaligned(DMI_HAWINDOW << 2, 4),
device = device,
beatBytes = 4,
executable = false
)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
require (intnode.edges.in.size == 0, "Debug Module does not accept interrupts")
val nComponents = intnode.out.size
def getNComponents = () => nComponents
val supportHartArray = cfg.supportHartArray && (nComponents > 1) // no hart array if only one hart
val io = IO(new Bundle {
/** structure for top-level Debug Module signals which aren't the bus interfaces. */
val ctrl = (new DebugCtrlBundle(nComponents))
/** control signals for Inner, generated in Outer */
val innerCtrl = new DecoupledIO(new DebugInternalBundle(nComponents))
/** debug interruption from Inner to Outer
*
* contains 2 type of debug interruption causes:
* - halt group
* - halt-on-reset
*/
val hgDebugInt = Input(Vec(nComponents, Bool()))
/** hart reset request to core */
val hartResetReq = cfg.hasHartResets.option(Output(Vec(nComponents, Bool())))
/** authentication support */
val dmAuthenticated = cfg.hasAuthentication.option(Input(Bool()))
})
val omRegMap = withReset(reset.asAsyncReset) {
// FIXME: Instead of casting reset to ensure it is Async, assert/require reset.Type == AsyncReset (when this feature is available)
val dmAuthenticated = io.dmAuthenticated.map( dma =>
ResetSynchronizerShiftReg(in=dma, sync=3, name=Some("dmAuthenticated_sync"))).getOrElse(true.B)
//----DMCONTROL (The whole point of 'Outer' is to maintain this register on dmiClock (e.g. TCK) domain, so that it
// can be written even if 'Inner' is not being clocked or is in reset. This allows halting
// harts while the rest of the system is in reset. It doesn't really allow any other
// register accesses, which will keep returning 'busy' to the debugger interface.
val DMCONTROLReset = WireInit(0.U.asTypeOf(new DMCONTROLFields()))
val DMCONTROLNxt = WireInit(0.U.asTypeOf(new DMCONTROLFields()))
val DMCONTROLReg = RegNext(next=DMCONTROLNxt, init=0.U.asTypeOf(DMCONTROLNxt)).suggestName("DMCONTROLReg")
val hartsel_mask = if (nComponents > 1) ((1 << p(MaxHartIdBits)) - 1).U else 0.U
val DMCONTROLWrData = WireInit(0.U.asTypeOf(new DMCONTROLFields()))
val dmactiveWrEn = WireInit(false.B)
val ndmresetWrEn = WireInit(false.B)
val clrresethaltreqWrEn = WireInit(false.B)
val setresethaltreqWrEn = WireInit(false.B)
val hartselloWrEn = WireInit(false.B)
val haselWrEn = WireInit(false.B)
val ackhaveresetWrEn = WireInit(false.B)
val hartresetWrEn = WireInit(false.B)
val resumereqWrEn = WireInit(false.B)
val haltreqWrEn = WireInit(false.B)
val dmactive = DMCONTROLReg.dmactive
DMCONTROLNxt := DMCONTROLReg
when (~dmactive) {
DMCONTROLNxt := DMCONTROLReset
} .otherwise {
when (dmAuthenticated && ndmresetWrEn) { DMCONTROLNxt.ndmreset := DMCONTROLWrData.ndmreset }
when (dmAuthenticated && hartselloWrEn) { DMCONTROLNxt.hartsello := DMCONTROLWrData.hartsello & hartsel_mask}
when (dmAuthenticated && haselWrEn) { DMCONTROLNxt.hasel := DMCONTROLWrData.hasel }
when (dmAuthenticated && hartresetWrEn) { DMCONTROLNxt.hartreset := DMCONTROLWrData.hartreset }
when (dmAuthenticated && haltreqWrEn) { DMCONTROLNxt.haltreq := DMCONTROLWrData.haltreq }
}
// Put this last to override its own effects.
when (dmactiveWrEn) {
DMCONTROLNxt.dmactive := DMCONTROLWrData.dmactive
}
//----HARTINFO
// DATA registers are mapped to memory. The dataaddr field of HARTINFO has only
// 12 bits and assumes the DM base is 0. If not at 0, then HARTINFO reads as 0
// (implying nonexistence according to the Debug Spec).
val HARTINFORdData = WireInit(0.U.asTypeOf(new HARTINFOFields()))
if (cfg.atzero) when (dmAuthenticated) {
HARTINFORdData.dataaccess := true.B
HARTINFORdData.datasize := cfg.nAbstractDataWords.U
HARTINFORdData.dataaddr := DsbRegAddrs.DATA.U
HARTINFORdData.nscratch := cfg.nScratch.U
}
//--------------------------------------------------------------
// Hart array mask and window
// hamask is hart array mask(1 bit per component), which doesn't include the hart selected by dmcontrol.hartsello
// HAWINDOWSEL selects a 32-bit slice of HAMASK to be visible for read/write in HAWINDOW
//--------------------------------------------------------------
val hamask = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
def haWindowSize = 32
// The following need to be declared even if supportHartArray is false due to reference
// at compile time by dmiNode.regmap
val HAWINDOWSELWrData = WireInit(0.U.asTypeOf(new HAWINDOWSELFields()))
val HAWINDOWSELWrEn = WireInit(false.B)
val HAWINDOWRdData = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAWINDOWWrData = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAWINDOWWrEn = WireInit(false.B)
/** whether the hart is selected */
def hartSelected(hart: Int): Bool = {
((io.innerCtrl.bits.hartsel === hart.U) ||
(if (supportHartArray) io.innerCtrl.bits.hasel && io.innerCtrl.bits.hamask(hart) else false.B))
}
val HAWINDOWSELNxt = WireInit(0.U.asTypeOf(new HAWINDOWSELFields()))
val HAWINDOWSELReg = RegNext(next=HAWINDOWSELNxt, init=0.U.asTypeOf(HAWINDOWSELNxt))
if (supportHartArray) {
val HAWINDOWSELReset = WireInit(0.U.asTypeOf(new HAWINDOWSELFields()))
HAWINDOWSELNxt := HAWINDOWSELReg
when (~dmactive || ~dmAuthenticated) {
HAWINDOWSELNxt := HAWINDOWSELReset
} .otherwise {
when (HAWINDOWSELWrEn) {
// Unneeded upper bits of HAWINDOWSEL are tied to 0. Entire register is 0 if all harts fit in one window
if (nComponents > haWindowSize) {
HAWINDOWSELNxt.hawindowsel := HAWINDOWSELWrData.hawindowsel & ((1 << (log2Up(nComponents) - 5)) - 1).U
} else {
HAWINDOWSELNxt.hawindowsel := 0.U
}
}
}
val numHAMASKSlices = ((nComponents - 1)/haWindowSize)+1
HAWINDOWRdData.maskdata := 0.U // default, overridden below
// for each slice,use a hamaskReg to store the selection info
for (ii <- 0 until numHAMASKSlices) {
val sliceMask = if (nComponents > ((ii*haWindowSize) + haWindowSize-1)) (BigInt(1) << haWindowSize) - 1 // All harts in this slice exist
else (BigInt(1)<<(nComponents - (ii*haWindowSize))) - 1 // Partial last slice
val HAMASKRst = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAMASKNxt = WireInit(0.U.asTypeOf(new HAWINDOWFields()))
val HAMASKReg = RegNext(next=HAMASKNxt, init=0.U.asTypeOf(HAMASKNxt))
when (ii.U === HAWINDOWSELReg.hawindowsel) {
HAWINDOWRdData.maskdata := HAMASKReg.asUInt & sliceMask.U
}
HAMASKNxt.maskdata := HAMASKReg.asUInt
when (~dmactive || ~dmAuthenticated) {
HAMASKNxt := HAMASKRst
}.otherwise {
when (HAWINDOWWrEn && (ii.U === HAWINDOWSELReg.hawindowsel)) {
HAMASKNxt.maskdata := HAWINDOWWrData.maskdata
}
}
// drive each slice of hamask with stored HAMASKReg or with new value being written
for (jj <- 0 until haWindowSize) {
if (((ii*haWindowSize) + jj) < nComponents) {
val tempWrData = HAWINDOWWrData.maskdata.asBools
val tempMaskReg = HAMASKReg.asUInt.asBools
when (HAWINDOWWrEn && (ii.U === HAWINDOWSELReg.hawindowsel)) {
hamask(ii*haWindowSize + jj) := tempWrData(jj)
}.otherwise {
hamask(ii*haWindowSize + jj) := tempMaskReg(jj)
}
}
}
}
}
//--------------------------------------------------------------
// Halt-on-reset
// hrmaskReg is current set of harts that should halt-on-reset
// Reset state (dmactive=0) is all zeroes
// Bits are set by writing 1 to DMCONTROL.setresethaltreq
// Bits are cleared by writing 1 to DMCONTROL.clrresethaltreq
// Spec says if both are 1, then clrresethaltreq is executed
// hrmask is the halt-on-reset mask which will be sent to inner
//--------------------------------------------------------------
val hrmask = Wire(Vec(nComponents, Bool()))
val hrmaskNxt = Wire(Vec(nComponents, Bool()))
val hrmaskReg = RegNext(next=hrmaskNxt, init=0.U.asTypeOf(hrmaskNxt)).suggestName("hrmaskReg")
hrmaskNxt := hrmaskReg
for (component <- 0 until nComponents) {
when (~dmactive || ~dmAuthenticated) {
hrmaskNxt(component) := false.B
}.elsewhen (clrresethaltreqWrEn && DMCONTROLWrData.clrresethaltreq && hartSelected(component)) {
hrmaskNxt(component) := false.B
}.elsewhen (setresethaltreqWrEn && DMCONTROLWrData.setresethaltreq && hartSelected(component)) {
hrmaskNxt(component) := true.B
}
}
hrmask := hrmaskNxt
val dmControlRegFields = RegFieldGroup("dmcontrol", Some("debug module control register"), Seq(
WNotifyVal(1, DMCONTROLReg.dmactive & io.ctrl.dmactiveAck, DMCONTROLWrData.dmactive, dmactiveWrEn,
RegFieldDesc("dmactive", "debug module active", reset=Some(0))),
WNotifyVal(1, DMCONTROLReg.ndmreset, DMCONTROLWrData.ndmreset, ndmresetWrEn,
RegFieldDesc("ndmreset", "debug module reset output", reset=Some(0))),
WNotifyVal(1, 0.U, DMCONTROLWrData.clrresethaltreq, clrresethaltreqWrEn,
RegFieldDesc("clrresethaltreq", "clear reset halt request", reset=Some(0), access=RegFieldAccessType.W)),
WNotifyVal(1, 0.U, DMCONTROLWrData.setresethaltreq, setresethaltreqWrEn,
RegFieldDesc("setresethaltreq", "set reset halt request", reset=Some(0), access=RegFieldAccessType.W)),
RegField(12),
if (nComponents > 1) WNotifyVal(p(MaxHartIdBits),
DMCONTROLReg.hartsello, DMCONTROLWrData.hartsello, hartselloWrEn,
RegFieldDesc("hartsello", "hart select low", reset=Some(0)))
else RegField(1),
if (nComponents > 1) RegField(10-p(MaxHartIdBits))
else RegField(9),
if (supportHartArray)
WNotifyVal(1, DMCONTROLReg.hasel, DMCONTROLWrData.hasel, haselWrEn,
RegFieldDesc("hasel", "hart array select", reset=Some(0)))
else RegField(1),
RegField(1),
WNotifyVal(1, 0.U, DMCONTROLWrData.ackhavereset, ackhaveresetWrEn,
RegFieldDesc("ackhavereset", "acknowledge reset", reset=Some(0), access=RegFieldAccessType.W)),
if (cfg.hasHartResets)
WNotifyVal(1, DMCONTROLReg.hartreset, DMCONTROLWrData.hartreset, hartresetWrEn,
RegFieldDesc("hartreset", "hart reset request", reset=Some(0)))
else RegField(1),
WNotifyVal(1, 0.U, DMCONTROLWrData.resumereq, resumereqWrEn,
RegFieldDesc("resumereq", "resume request", reset=Some(0), access=RegFieldAccessType.W)),
WNotifyVal(1, DMCONTROLReg.haltreq, DMCONTROLWrData.haltreq, haltreqWrEn, // Spec says W, but maintaining previous behavior
RegFieldDesc("haltreq", "halt request", reset=Some(0)))
))
val hartinfoRegFields = RegFieldGroup("dmi_hartinfo", Some("hart information"), Seq(
RegField.r(12, HARTINFORdData.dataaddr, RegFieldDesc("dataaddr", "data address", reset=Some(if (cfg.atzero) DsbRegAddrs.DATA else 0))),
RegField.r(4, HARTINFORdData.datasize, RegFieldDesc("datasize", "number of DATA registers", reset=Some(if (cfg.atzero) cfg.nAbstractDataWords else 0))),
RegField.r(1, HARTINFORdData.dataaccess, RegFieldDesc("dataaccess", "data access type", reset=Some(if (cfg.atzero) 1 else 0))),
RegField(3),
RegField.r(4, HARTINFORdData.nscratch, RegFieldDesc("nscratch", "number of scratch registers", reset=Some(if (cfg.atzero) cfg.nScratch else 0)))
))
//--------------------------------------------------------------
// DMI register decoder for Outer
//--------------------------------------------------------------
// regmap addresses are byte offsets from lowest address
def DMI_DMCONTROL_OFFSET = 0
def DMI_HARTINFO_OFFSET = ((DMI_HARTINFO - DMI_DMCONTROL) << 2)
def DMI_HAWINDOWSEL_OFFSET = ((DMI_HAWINDOWSEL - DMI_DMCONTROL) << 2)
def DMI_HAWINDOW_OFFSET = ((DMI_HAWINDOW - DMI_DMCONTROL) << 2)
val omRegMap = dmiNode.regmap(
DMI_DMCONTROL_OFFSET -> dmControlRegFields,
DMI_HARTINFO_OFFSET -> hartinfoRegFields,
DMI_HAWINDOWSEL_OFFSET -> (if (supportHartArray && (nComponents > 32)) Seq(
WNotifyVal(log2Up(nComponents)-5, HAWINDOWSELReg.hawindowsel, HAWINDOWSELWrData.hawindowsel, HAWINDOWSELWrEn,
RegFieldDesc("hawindowsel", "hart array window select", reset=Some(0)))) else Nil),
DMI_HAWINDOW_OFFSET -> (if (supportHartArray) Seq(
WNotifyVal(if (nComponents > 31) 32 else nComponents, HAWINDOWRdData.maskdata, HAWINDOWWrData.maskdata, HAWINDOWWrEn,
RegFieldDesc("hawindow", "hart array window", reset=Some(0), volatile=(nComponents > 32)))) else Nil)
)
//--------------------------------------------------------------
// Interrupt Registers
//--------------------------------------------------------------
val debugIntNxt = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
val debugIntRegs = RegNext(next=debugIntNxt, init=0.U.asTypeOf(debugIntNxt)).suggestName("debugIntRegs")
debugIntNxt := debugIntRegs
val (intnode_out, _) = intnode.out.unzip
for (component <- 0 until nComponents) {
intnode_out(component)(0) := debugIntRegs(component) | io.hgDebugInt(component)
}
// sends debug interruption to Core when dmcs.haltreq is set,
for (component <- 0 until nComponents) {
when (~dmactive || ~dmAuthenticated) {
debugIntNxt(component) := false.B
}. otherwise {
when (haltreqWrEn && ((DMCONTROLWrData.hartsello === component.U)
|| (if (supportHartArray) DMCONTROLWrData.hasel && hamask(component) else false.B))) {
debugIntNxt(component) := DMCONTROLWrData.haltreq
}
}
}
// Halt request registers are set & cleared by writes to DMCONTROL.haltreq
// resumereq also causes the core to execute a 'dret',
// so resumereq is passed through to Inner.
// hartsel/hasel/hamask must also be used by the DebugModule state machine,
// so it is passed to Inner.
// These registers ensure that requests to dmInner are not lost if inner clock isn't running or requests occur too close together.
// If the innerCtrl async queue is not ready, the notification will be posted and held until ready is received.
// Additional notifications that occur while one is already waiting update the pending data so that the last value written is sent.
// Volatile events resumereq and ackhavereset are registered when they occur and remain pending until ready is received.
val innerCtrlValid = Wire(Bool())
val innerCtrlValidReg = RegInit(false.B).suggestName("innerCtrlValidReg")
val innerCtrlResumeReqReg = RegInit(false.B).suggestName("innerCtrlResumeReqReg")
val innerCtrlAckHaveResetReg = RegInit(false.B).suggestName("innerCtrlAckHaveResetReg")
innerCtrlValid := hartselloWrEn | resumereqWrEn | ackhaveresetWrEn | setresethaltreqWrEn | clrresethaltreqWrEn | haselWrEn |
(HAWINDOWWrEn & supportHartArray.B)
innerCtrlValidReg := io.innerCtrl.valid & ~io.innerCtrl.ready // Hold innerctrl request until the async queue accepts it
innerCtrlResumeReqReg := io.innerCtrl.bits.resumereq & ~io.innerCtrl.ready // Hold resumereq until accepted
innerCtrlAckHaveResetReg := io.innerCtrl.bits.ackhavereset & ~io.innerCtrl.ready // Hold ackhavereset until accepted
io.innerCtrl.valid := innerCtrlValid | innerCtrlValidReg
io.innerCtrl.bits.hartsel := Mux(hartselloWrEn, DMCONTROLWrData.hartsello, DMCONTROLReg.hartsello)
io.innerCtrl.bits.resumereq := (resumereqWrEn & DMCONTROLWrData.resumereq) | innerCtrlResumeReqReg
io.innerCtrl.bits.ackhavereset := (ackhaveresetWrEn & DMCONTROLWrData.ackhavereset) | innerCtrlAckHaveResetReg
io.innerCtrl.bits.hrmask := hrmask
if (supportHartArray) {
io.innerCtrl.bits.hasel := Mux(haselWrEn, DMCONTROLWrData.hasel, DMCONTROLReg.hasel)
io.innerCtrl.bits.hamask := hamask
} else {
io.innerCtrl.bits.hasel := DontCare
io.innerCtrl.bits.hamask := DontCare
}
io.ctrl.ndreset := DMCONTROLReg.ndmreset
io.ctrl.dmactive := DMCONTROLReg.dmactive
// hart reset mechanism implementation
if (cfg.hasHartResets) {
val hartResetNxt = Wire(Vec(nComponents, Bool()))
val hartResetReg = RegNext(next=hartResetNxt, init=0.U.asTypeOf(hartResetNxt))
for (component <- 0 until nComponents) {
hartResetNxt(component) := DMCONTROLReg.hartreset & hartSelected(component)
io.hartResetReq.get(component) := hartResetReg(component)
}
}
omRegMap // FIXME: Remove this when withReset is removed
}}
}
// wrap a Outer with a DMIToTL, derived by dmi clock & reset
class TLDebugModuleOuterAsync(device: Device)(implicit p: Parameters) extends LazyModule {
val cfg = p(DebugModuleKey).get
val dmiXbar = LazyModule (new TLXbar(nameSuffix = Some("dmixbar")))
val dmi2tlOpt = (!p(ExportDebug).apb).option({
val dmi2tl = LazyModule(new DMIToTL())
dmiXbar.node := dmi2tl.node
dmi2tl
})
val apbNodeOpt = p(ExportDebug).apb.option({
val apb2tl = LazyModule(new APBToTL())
val apb2tlBuffer = LazyModule(new TLBuffer(BufferParams.pipe))
val dmTopAddr = (1 << cfg.nDMIAddrSize) << 2
val tlErrorParams = DevNullParams(AddressSet.misaligned(dmTopAddr, APBDebugConsts.apbDebugRegBase-dmTopAddr), maxAtomic=0, maxTransfer=4)
val tlError = LazyModule(new TLError(tlErrorParams, buffer=false))
val apbXbar = LazyModule(new APBFanout())
val apbRegs = LazyModule(new APBDebugRegisters())
apbRegs.node := apbXbar.node
apb2tl.node := apbXbar.node
apb2tlBuffer.node := apb2tl.node
dmiXbar.node := apb2tlBuffer.node
tlError.node := dmiXbar.node
apbXbar.node
})
val dmOuter = LazyModule( new TLDebugModuleOuter(device))
val intnode = IntSyncIdentityNode()
intnode :*= IntSyncCrossingSource(alreadyRegistered = true) :*= dmOuter.intnode
val dmiBypass = LazyModule(new TLBusBypass(beatBytes=4, bufferError=false, maxAtomic=0, maxTransfer=4))
val dmiInnerNode = TLAsyncCrossingSource() := dmiBypass.node := dmiXbar.node
dmOuter.dmiNode := dmiXbar.node
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val nComponents = dmOuter.intnode.edges.out.size
val io = IO(new Bundle {
val dmi_clock = Input(Clock())
val dmi_reset = Input(Reset())
/** Debug Module Interface bewteen DM and DTM
*
* The DTM provides access to one or more Debug Modules (DMs) using DMI
*/
val dmi = (!p(ExportDebug).apb).option(Flipped(new DMIIO()(p)))
// Optional APB Interface is fully diplomatic so is not listed here.
val ctrl = new DebugCtrlBundle(nComponents)
/** conrol signals for Inner, generated in Outer */
val innerCtrl = new AsyncBundle(new DebugInternalBundle(nComponents), AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset))
/** debug interruption generated in Inner */
val hgDebugInt = Input(Vec(nComponents, Bool()))
/** hart reset request to core */
val hartResetReq = p(DebugModuleKey).get.hasHartResets.option(Output(Vec(nComponents, Bool())))
/** Authentication signal from core */
val dmAuthenticated = p(DebugModuleKey).get.hasAuthentication.option(Input(Bool()))
})
val rf_reset = IO(Input(Reset())) // RF transform
childClock := io.dmi_clock
childReset := io.dmi_reset
override def provideImplicitClockToLazyChildren = true
withClockAndReset(childClock, childReset) {
dmi2tlOpt.foreach { _.module.io.dmi <> io.dmi.get }
val dmactiveAck = AsyncResetSynchronizerShiftReg(in=io.ctrl.dmactiveAck, sync=3, name=Some("dmactiveAckSync"))
dmiBypass.module.io.bypass := ~io.ctrl.dmactive | ~dmactiveAck
io.ctrl <> dmOuter.module.io.ctrl
dmOuter.module.io.ctrl.dmactiveAck := dmactiveAck // send synced version down to dmOuter
io.innerCtrl <> ToAsyncBundle(dmOuter.module.io.innerCtrl, AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset))
dmOuter.module.io.hgDebugInt := io.hgDebugInt
io.hartResetReq.foreach { x => dmOuter.module.io.hartResetReq.foreach {y => x := y}}
io.dmAuthenticated.foreach { x => dmOuter.module.io.dmAuthenticated.foreach { y => y := x}}
}
}
}
class TLDebugModuleInner(device: Device, getNComponents: () => Int, beatBytes: Int)(implicit p: Parameters) extends LazyModule
{
// For Shorter Register Names
import DMI_RegAddrs._
val cfg = p(DebugModuleKey).get
def getCfg = () => cfg
val dmTopAddr = (1 << cfg.nDMIAddrSize) << 2
/** dmiNode address set */
val dmiNode = TLRegisterNode(
// Address is range 0 to 0x1FF except DMCONTROL, HARTINFO, HAWINDOWSEL, HAWINDOW which are handled by Outer
address = AddressSet.misaligned(0, DMI_DMCONTROL << 2) ++
AddressSet.misaligned((DMI_DMCONTROL + 1) << 2, ((DMI_HARTINFO << 2) - ((DMI_DMCONTROL + 1) << 2))) ++
AddressSet.misaligned((DMI_HARTINFO + 1) << 2, ((DMI_HAWINDOWSEL << 2) - ((DMI_HARTINFO + 1) << 2))) ++
AddressSet.misaligned((DMI_HAWINDOW + 1) << 2, (dmTopAddr - ((DMI_HAWINDOW + 1) << 2))),
device = device,
beatBytes = 4,
executable = false
)
val tlNode = TLRegisterNode(
address=Seq(cfg.address),
device=device,
beatBytes=beatBytes,
executable=true
)
val sb2tlOpt = cfg.hasBusMaster.option(LazyModule(new SBToTL()))
// If we want to support custom registers read through Abstract Commands,
// provide a place to bring them into the debug module. What this connects
// to is up to the implementation.
val customNode = new DebugCustomSink()
lazy val module = new Impl
class Impl extends LazyModuleImp(this){
val nComponents = getNComponents()
Annotated.params(this, cfg)
val supportHartArray = cfg.supportHartArray & (nComponents > 1)
val nExtTriggers = cfg.nExtTriggers
val nHaltGroups = if ((nComponents > 1) | (nExtTriggers > 0)) cfg.nHaltGroups
else 0 // no halt groups possible if single hart with no external triggers
val hartSelFuncs = if (getNComponents() > 1) p(DebugModuleHartSelKey) else DebugModuleHartSelFuncs(
hartIdToHartSel = (x) => 0.U,
hartSelToHartId = (x) => x
)
val io = IO(new Bundle {
/** dm reset signal passed in from Outer */
val dmactive = Input(Bool())
/** conrol signals for Inner
*
* it's generated by Outer and comes in
*/
val innerCtrl = Flipped(new DecoupledIO(new DebugInternalBundle(nComponents)))
/** debug unavail signal passed in from Outer*/
val debugUnavail = Input(Vec(nComponents, Bool()))
/** debug interruption from Inner to Outer
*
* contain 2 type of debug interruption causes:
* - halt group
* - halt-on-reset
*/
val hgDebugInt = Output(Vec(nComponents, Bool()))
/** interface for trigger */
val extTrigger = (nExtTriggers > 0).option(new DebugExtTriggerIO())
/** vector to indicate which hart is in reset
*
* dm receives it from core and sends it to Inner
*/
val hartIsInReset = Input(Vec(nComponents, Bool()))
val tl_clock = Input(Clock())
val tl_reset = Input(Reset())
/** Debug Authentication signals from core */
val auth = cfg.hasAuthentication.option(new DebugAuthenticationIO())
})
sb2tlOpt.map { sb =>
sb.module.clock := io.tl_clock
sb.module.reset := io.tl_reset
sb.module.rf_reset := io.tl_reset
}
//--------------------------------------------------------------
// Import constants for shorter variable names
//--------------------------------------------------------------
import DMI_RegAddrs._
import DsbRegAddrs._
import DsbBusConsts._
//--------------------------------------------------------------
// Sanity Check Configuration For this implementation.
//--------------------------------------------------------------
require (cfg.supportQuickAccess == false, "No Quick Access support yet")
require ((nHaltGroups > 0) || (nExtTriggers == 0), "External triggers require at least 1 halt group")
//--------------------------------------------------------------
// Register & Wire Declarations (which need to be pre-declared)
//--------------------------------------------------------------
// run control regs: tracking all the harts
// implements: see implementation-specific bits part
/** all harts halted status */
val haltedBitRegs = Reg(UInt(nComponents.W))
/** all harts resume request status */
val resumeReqRegs = Reg(UInt(nComponents.W))
/** all harts have reset status */
val haveResetBitRegs = Reg(UInt(nComponents.W))
// default is 1,after resume, resumeAcks get 0
/** all harts resume ack status */
val resumeAcks = Wire(UInt(nComponents.W))
// --- regmapper outputs
// hart state Id and En
// in Hart Bus Access ROM
val hartHaltedWrEn = Wire(Bool())
val hartHaltedId = Wire(UInt(sbIdWidth.W))
val hartGoingWrEn = Wire(Bool())
val hartGoingId = Wire(UInt(sbIdWidth.W))
val hartResumingWrEn = Wire(Bool())
val hartResumingId = Wire(UInt(sbIdWidth.W))
val hartExceptionWrEn = Wire(Bool())
val hartExceptionId = Wire(UInt(sbIdWidth.W))
// progbuf and abstract data: byte-addressable control logic
// AccessLegal is set only when state = waiting
// RdEn and WrEnMaybe : contrl signal drived by DMI bus
val dmiProgramBufferRdEn = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} ))
val dmiProgramBufferAccessLegal = WireInit(false.B)
val dmiProgramBufferWrEnMaybe = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} ))
val dmiAbstractDataRdEn = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} ))
val dmiAbstractDataAccessLegal = WireInit(false.B)
val dmiAbstractDataWrEnMaybe = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} ))
//--------------------------------------------------------------
// Registers coming from 'CONTROL' in Outer
//--------------------------------------------------------------
val dmAuthenticated = io.auth.map(a => a.dmAuthenticated).getOrElse(true.B)
val selectedHartReg = Reg(UInt(p(MaxHartIdBits).W))
// hamaskFull is a vector of all selected harts including hartsel, whether or not supportHartArray is true
val hamaskFull = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
if (nComponents > 1) {
when (~io.dmactive) {
selectedHartReg := 0.U
}.elsewhen (io.innerCtrl.fire){
selectedHartReg := io.innerCtrl.bits.hartsel
}
}
if (supportHartArray) {
val hamaskZero = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
val hamaskReg = Reg(Vec(nComponents, Bool()))
when (~io.dmactive || ~dmAuthenticated) {
hamaskReg := hamaskZero
}.elsewhen (io.innerCtrl.fire){
hamaskReg := Mux(io.innerCtrl.bits.hasel, io.innerCtrl.bits.hamask, hamaskZero)
}
hamaskFull := hamaskReg
}
// Outer.hamask doesn't consider the hart selected by dmcontrol.hartsello,
// so append it here
when (selectedHartReg < nComponents.U) {
hamaskFull(if (nComponents == 1) 0.U(0.W) else selectedHartReg) := true.B
}
io.innerCtrl.ready := true.B
// Construct a Vec from io.innerCtrl fields indicating whether each hart is being selected in this write
// A hart may be selected by hartsel field or by hart array
val hamaskWrSel = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
for (component <- 0 until nComponents ) {
hamaskWrSel(component) := ((io.innerCtrl.bits.hartsel === component.U) ||
(if (supportHartArray) io.innerCtrl.bits.hasel && io.innerCtrl.bits.hamask(component) else false.B))
}
//-------------------------------------
// Halt-on-reset logic
// hrmask is set in dmOuter and passed in
// Debug interrupt is generated when a reset occurs whose corresponding hrmask bit is set
// Debug interrupt is maintained until the hart enters halted state
//-------------------------------------
val hrReset = WireInit(VecInit(Seq.fill(nComponents) { false.B } ))
val hrDebugInt = Wire(Vec(nComponents, Bool()))
val hrmaskReg = RegInit(hrReset)
val hartIsInResetSync = Wire(Vec(nComponents, Bool()))
for (component <- 0 until nComponents) {
hartIsInResetSync(component) := AsyncResetSynchronizerShiftReg(io.hartIsInReset(component), 3, Some(s"debug_hartReset_$component"))
}
when (~io.dmactive || ~dmAuthenticated) {
hrmaskReg := hrReset
}.elsewhen (io.innerCtrl.fire){
hrmaskReg := io.innerCtrl.bits.hrmask
}
withReset(reset.asAsyncReset) { // ensure interrupt requests are negated at first clock edge
val hrDebugIntReg = RegInit(VecInit(Seq.fill(nComponents) { false.B } ))
when (~io.dmactive || ~dmAuthenticated) {
hrDebugIntReg := hrReset
}.otherwise {
hrDebugIntReg := hrmaskReg &
(hartIsInResetSync | // set debugInt during reset
(hrDebugIntReg & ~(haltedBitRegs.asBools))) // maintain until core halts
}
hrDebugInt := hrDebugIntReg
}
//--------------------------------------------------------------
// DMI Registers
//--------------------------------------------------------------
//----DMSTATUS
val DMSTATUSRdData = WireInit(0.U.asTypeOf(new DMSTATUSFields()))
DMSTATUSRdData.authenticated := dmAuthenticated
DMSTATUSRdData.version := 2.U // Version 0.13
io.auth.map(a => DMSTATUSRdData.authbusy := a.dmAuthBusy)
val resumereq = io.innerCtrl.fire && io.innerCtrl.bits.resumereq
when (dmAuthenticated) {
DMSTATUSRdData.hasresethaltreq := true.B
DMSTATUSRdData.anynonexistent := (selectedHartReg >= nComponents.U) // only hartsel can be nonexistent
// all harts nonexistent if hartsel is out of range and there are no harts selected in the hart array
DMSTATUSRdData.allnonexistent := (selectedHartReg >= nComponents.U) & (~hamaskFull.reduce(_ | _))
when (~DMSTATUSRdData.allnonexistent) { // if no existent harts selected, all other status is false
DMSTATUSRdData.anyunavail := (io.debugUnavail & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyhalted := ((~io.debugUnavail & (haltedBitRegs.asBools)) & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyrunning := ((~io.debugUnavail & ~(haltedBitRegs.asBools)) & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyhavereset := (haveResetBitRegs.asBools & hamaskFull).reduce(_ | _)
DMSTATUSRdData.anyresumeack := (resumeAcks.asBools & hamaskFull).reduce(_ | _)
when (~DMSTATUSRdData.anynonexistent) { // if one hart is nonexistent, no 'all' status is set
DMSTATUSRdData.allunavail := (io.debugUnavail | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allhalted := ((~io.debugUnavail & (haltedBitRegs.asBools)) | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allrunning := ((~io.debugUnavail & ~(haltedBitRegs.asBools)) | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allhavereset := (haveResetBitRegs.asBools | ~hamaskFull).reduce(_ & _)
DMSTATUSRdData.allresumeack := (resumeAcks.asBools | ~hamaskFull).reduce(_ & _)
}
}
//TODO
DMSTATUSRdData.confstrptrvalid := false.B
DMSTATUSRdData.impebreak := (cfg.hasImplicitEbreak).B
}
when(~io.dmactive || ~dmAuthenticated) {
haveResetBitRegs := 0.U
}.otherwise {
when (io.innerCtrl.fire && io.innerCtrl.bits.ackhavereset) {
haveResetBitRegs := (haveResetBitRegs & (~(hamaskWrSel.asUInt))) | hartIsInResetSync.asUInt
}.otherwise {
haveResetBitRegs := haveResetBitRegs | hartIsInResetSync.asUInt
}
}
//----DMCS2 (Halt Groups)
val DMCS2RdData = WireInit(0.U.asTypeOf(new DMCS2Fields()))
val DMCS2WrData = WireInit(0.U.asTypeOf(new DMCS2Fields()))
val hgselectWrEn = WireInit(false.B)
val hgwriteWrEn = WireInit(false.B)
val haltgroupWrEn = WireInit(false.B)
val exttriggerWrEn = WireInit(false.B)
val hgDebugInt = WireInit(VecInit(Seq.fill(nComponents) {false.B} ))
if (nHaltGroups > 0) withReset (reset.asAsyncReset) { // async reset ensures triggers don't falsely fire during startup
val hgBits = log2Up(nHaltGroups)
// hgParticipate: Each entry indicates which hg that entity belongs to (1 to nHartGroups). 0 means no hg assigned.
val hgParticipateHart = RegInit(VecInit(Seq.fill(nComponents)(0.U(hgBits.W))))
val hgParticipateTrig = if (nExtTriggers > 0) RegInit(VecInit(Seq.fill(nExtTriggers)(0.U(hgBits.W)))) else Nil
// assign group index to current seledcted harts
for (component <- 0 until nComponents) {
when (~io.dmactive || ~dmAuthenticated) {
hgParticipateHart(component) := 0.U
}.otherwise {
when (haltgroupWrEn & DMCS2WrData.hgwrite & ~DMCS2WrData.hgselect &
hamaskFull(component) & (DMCS2WrData.haltgroup <= nHaltGroups.U)) {
hgParticipateHart(component) := DMCS2WrData.haltgroup
}
}
}
DMCS2RdData.haltgroup := hgParticipateHart(if (nComponents == 1) 0.U(0.W) else selectedHartReg)
if (nExtTriggers > 0) {
val hgSelect = Reg(Bool())
when (~io.dmactive || ~dmAuthenticated) {
hgSelect := false.B
}.otherwise {
when (hgselectWrEn) {
hgSelect := DMCS2WrData.hgselect
}
}
// assign group index to trigger
for (trigger <- 0 until nExtTriggers) {
when (~io.dmactive || ~dmAuthenticated) {
hgParticipateTrig(trigger) := 0.U
}.otherwise {
when (haltgroupWrEn & DMCS2WrData.hgwrite & DMCS2WrData.hgselect &
(DMCS2WrData.exttrigger === trigger.U) & (DMCS2WrData.haltgroup <= nHaltGroups.U)) {
hgParticipateTrig(trigger) := DMCS2WrData.haltgroup
}
}
}
DMCS2RdData.hgselect := hgSelect
when (hgSelect) {
DMCS2RdData.haltgroup := hgParticipateTrig(0)
}
// If there is only 1 ext trigger, then the exttrigger field is fixed at 0
// Otherwise, instantiate a register with only the number of bits required
if (nExtTriggers > 1) {
val trigBits = log2Up(nExtTriggers-1)
val hgExtTrigger = Reg(UInt(trigBits.W))
when (~io.dmactive || ~dmAuthenticated) {
hgExtTrigger := 0.U
}.otherwise {
when (exttriggerWrEn & (DMCS2WrData.exttrigger < nExtTriggers.U)) {
hgExtTrigger := DMCS2WrData.exttrigger
}
}
DMCS2RdData.exttrigger := hgExtTrigger
when (hgSelect) {
DMCS2RdData.haltgroup := hgParticipateTrig(hgExtTrigger)
}
}
}
// Halt group state machine
// IDLE: Go to FIRED when any hart in this hg writes to HALTED while its HaltedBitRegs=0
// or when any trigin assigned to this hg occurs
// FIRED: Back to IDLE when all harts in this hg have set their haltedBitRegs
// and all trig out in this hg have been acknowledged
val hgFired = RegInit (VecInit(Seq.fill(nHaltGroups+1) {false.B} ))
val hgHartFiring = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // which hg's are firing due to hart halting
val hgTrigFiring = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // which hg's are firing due to trig in
val hgHartsAllHalted = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // in which hg's have all harts halted
val hgTrigsAllAcked = WireInit(VecInit(Seq.fill(nHaltGroups+1) { true.B} )) // in which hg's have all trigouts been acked
io.extTrigger.foreach {extTrigger =>
val extTriggerInReq = Wire(Vec(nExtTriggers, Bool()))
val extTriggerOutAck = Wire(Vec(nExtTriggers, Bool()))
extTriggerInReq := extTrigger.in.req.asBools
extTriggerOutAck := extTrigger.out.ack.asBools
val trigInReq = ResetSynchronizerShiftReg(in=extTriggerInReq, sync=3, name=Some("dm_extTriggerInReqSync"))
val trigOutAck = ResetSynchronizerShiftReg(in=extTriggerOutAck, sync=3, name=Some("dm_extTriggerOutAckSync"))
for (hg <- 1 to nHaltGroups) {
hgTrigFiring(hg) := (trigInReq & ~RegNext(trigInReq) & hgParticipateTrig.map(_ === hg.U)).reduce(_ | _)
hgTrigsAllAcked(hg) := (trigOutAck | hgParticipateTrig.map(_ =/= hg.U)).reduce(_ & _)
}
extTrigger.in.ack := trigInReq.asUInt
}
for (hg <- 1 to nHaltGroups) {
hgHartFiring(hg) := hartHaltedWrEn & ~haltedBitRegs(hartHaltedId) & (hgParticipateHart(hartSelFuncs.hartIdToHartSel(hartHaltedId)) === hg.U)
hgHartsAllHalted(hg) := (haltedBitRegs.asBools | hgParticipateHart.map(_ =/= hg.U)).reduce(_ & _)
when (~io.dmactive || ~dmAuthenticated) {
hgFired(hg) := false.B
}.elsewhen (~hgFired(hg) & (hgHartFiring(hg) | hgTrigFiring(hg))) {
hgFired(hg) := true.B
}.elsewhen ( hgFired(hg) & hgHartsAllHalted(hg) & hgTrigsAllAcked(hg)) {
hgFired(hg) := false.B
}
}
// For each hg that has fired, assert debug interrupt to each hart in that hg
for (component <- 0 until nComponents) {
hgDebugInt(component) := hgFired(hgParticipateHart(component))
}
// For each hg that has fired, assert trigger out for all external triggers in that hg
io.extTrigger.foreach {extTrigger =>
val extTriggerOutReq = RegInit(VecInit(Seq.fill(cfg.nExtTriggers) {false.B} ))
for (trig <- 0 until nExtTriggers) {
extTriggerOutReq(trig) := hgFired(hgParticipateTrig(trig))
}
extTrigger.out.req := extTriggerOutReq.asUInt
}
}
io.hgDebugInt := hgDebugInt | hrDebugInt
//----HALTSUM*
val numHaltedStatus = ((nComponents - 1) / 32) + 1
val haltedStatus = Wire(Vec(numHaltedStatus, Bits(32.W)))
for (ii <- 0 until numHaltedStatus) {
when (dmAuthenticated) {
haltedStatus(ii) := haltedBitRegs >> (ii*32)
}.otherwise {
haltedStatus(ii) := 0.U
}
}
val haltedSummary = Cat(haltedStatus.map(_.orR).reverse)
val HALTSUM1RdData = haltedSummary.asTypeOf(new HALTSUM1Fields())
val selectedHaltedStatus = Mux((selectedHartReg >> 5) > numHaltedStatus.U, 0.U, haltedStatus(selectedHartReg >> 5))
val HALTSUM0RdData = selectedHaltedStatus.asTypeOf(new HALTSUM0Fields())
// Since we only support 1024 harts, we don't implement HALTSUM2 or HALTSUM3
//----ABSTRACTCS
val ABSTRACTCSReset = WireInit(0.U.asTypeOf(new ABSTRACTCSFields()))
ABSTRACTCSReset.datacount := cfg.nAbstractDataWords.U
ABSTRACTCSReset.progbufsize := cfg.nProgramBufferWords.U
val ABSTRACTCSReg = Reg(new ABSTRACTCSFields())
val ABSTRACTCSWrData = WireInit(0.U.asTypeOf(new ABSTRACTCSFields()))
val ABSTRACTCSRdData = WireInit(ABSTRACTCSReg)
val ABSTRACTCSRdEn = WireInit(false.B)
val ABSTRACTCSWrEnMaybe = WireInit(false.B)
val ABSTRACTCSWrEnLegal = WireInit(false.B)
val ABSTRACTCSWrEn = ABSTRACTCSWrEnMaybe && ABSTRACTCSWrEnLegal
// multiple error types
// find implement in the state machine part
val errorBusy = WireInit(false.B)
val errorException = WireInit(false.B)
val errorUnsupported = WireInit(false.B)
val errorHaltResume = WireInit(false.B)
when (~io.dmactive || ~dmAuthenticated) {
ABSTRACTCSReg := ABSTRACTCSReset
}.otherwise {
when (errorBusy){
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrBusy.id.U
}.elsewhen (errorException) {
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrException.id.U
}.elsewhen (errorUnsupported) {
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrNotSupported.id.U
}.elsewhen (errorHaltResume) {
ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrHaltResume.id.U
}.otherwise {
//W1C
when (ABSTRACTCSWrEn){
ABSTRACTCSReg.cmderr := ABSTRACTCSReg.cmderr & ~(ABSTRACTCSWrData.cmderr);
}
}
}
// For busy, see below state machine.
val abstractCommandBusy = WireInit(true.B)
ABSTRACTCSRdData.busy := abstractCommandBusy
when (~dmAuthenticated) { // read value must be 0 when not authenticated
ABSTRACTCSRdData.datacount := 0.U
ABSTRACTCSRdData.progbufsize := 0.U
}
//---- ABSTRACTAUTO
// It is a mask indicating whether datai/probufi have the autoexcution permisson
// this part aims to produce 3 wires : autoexecData,autoexecProg,autoexec
// first two specify which reg supports autoexec
// autoexec is a control signal, meaning there is at least one enabled autoexec reg
// when autoexec is set, generate instructions using COMMAND register
val ABSTRACTAUTOReset = WireInit(0.U.asTypeOf(new ABSTRACTAUTOFields()))
val ABSTRACTAUTOReg = Reg(new ABSTRACTAUTOFields())
val ABSTRACTAUTOWrData = WireInit(0.U.asTypeOf(new ABSTRACTAUTOFields()))
val ABSTRACTAUTORdData = WireInit(ABSTRACTAUTOReg)
val ABSTRACTAUTORdEn = WireInit(false.B)
val autoexecdataWrEnMaybe = WireInit(false.B)
val autoexecprogbufWrEnMaybe = WireInit(false.B)
val ABSTRACTAUTOWrEnLegal = WireInit(false.B)
when (~io.dmactive || ~dmAuthenticated) {
ABSTRACTAUTOReg := ABSTRACTAUTOReset
}.otherwise {
when (autoexecprogbufWrEnMaybe && ABSTRACTAUTOWrEnLegal) {
ABSTRACTAUTOReg.autoexecprogbuf := ABSTRACTAUTOWrData.autoexecprogbuf & ( (1 << cfg.nProgramBufferWords) - 1).U
}
when (autoexecdataWrEnMaybe && ABSTRACTAUTOWrEnLegal) {
ABSTRACTAUTOReg.autoexecdata := ABSTRACTAUTOWrData.autoexecdata & ( (1 << cfg.nAbstractDataWords) - 1).U
}
}
// Abstract Data access vector(byte-addressable)
val dmiAbstractDataAccessVec = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} ))
dmiAbstractDataAccessVec := (dmiAbstractDataWrEnMaybe zip dmiAbstractDataRdEn).map{ case (r,w) => r | w}
// Program Buffer access vector(byte-addressable)
val dmiProgramBufferAccessVec = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} ))
dmiProgramBufferAccessVec := (dmiProgramBufferWrEnMaybe zip dmiProgramBufferRdEn).map{ case (r,w) => r | w}
// at least one word access
val dmiAbstractDataAccess = dmiAbstractDataAccessVec.reduce(_ || _ )
val dmiProgramBufferAccess = dmiProgramBufferAccessVec.reduce(_ || _)
// This will take the shorter of the lists, which is what we want.
val autoexecData = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords) {false.B} ))
val autoexecProg = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords) {false.B} ))
(autoexecData zip ABSTRACTAUTOReg.autoexecdata.asBools).zipWithIndex.foreach {case (t, i) => t._1 := dmiAbstractDataAccessVec(i * 4) && t._2 }
(autoexecProg zip ABSTRACTAUTOReg.autoexecprogbuf.asBools).zipWithIndex.foreach {case (t, i) => t._1 := dmiProgramBufferAccessVec(i * 4) && t._2}
val autoexec = autoexecData.reduce(_ || _) || autoexecProg.reduce(_ || _)
//---- COMMAND
val COMMANDReset = WireInit(0.U.asTypeOf(new COMMANDFields()))
val COMMANDReg = Reg(new COMMANDFields())
val COMMANDWrDataVal = WireInit(0.U(32.W))
val COMMANDWrData = WireInit(COMMANDWrDataVal.asTypeOf(new COMMANDFields()))
val COMMANDWrEnMaybe = WireInit(false.B)
val COMMANDWrEnLegal = WireInit(false.B)
val COMMANDRdEn = WireInit(false.B)
val COMMANDWrEn = COMMANDWrEnMaybe && COMMANDWrEnLegal
val COMMANDRdData = COMMANDReg
when (~io.dmactive || ~dmAuthenticated) {
COMMANDReg := COMMANDReset
}.otherwise {
when (COMMANDWrEn) {
COMMANDReg := COMMANDWrData
}
}
// --- Abstract Data
// These are byte addressible, s.t. the Processor can use
// byte-addressible instructions to store to them.
val abstractDataMem = Reg(Vec(cfg.nAbstractDataWords*4, UInt(8.W)))
val abstractDataNxt = WireInit(abstractDataMem)
// --- Program Buffer
// byte-addressible mem
val programBufferMem = Reg(Vec(cfg.nProgramBufferWords*4, UInt(8.W)))
val programBufferNxt = WireInit(programBufferMem)
//--------------------------------------------------------------
// These bits are implementation-specific bits set
// by harts executing code.
//--------------------------------------------------------------
// Run control logic
when (~io.dmactive || ~dmAuthenticated) {
haltedBitRegs := 0.U
resumeReqRegs := 0.U
}.otherwise {
//remove those harts in reset
resumeReqRegs := resumeReqRegs & ~(hartIsInResetSync.asUInt)
val hartHaltedIdIndex = UIntToOH(hartSelFuncs.hartIdToHartSel(hartHaltedId))
val hartResumingIdIndex = UIntToOH(hartSelFuncs.hartIdToHartSel(hartResumingId))
val hartselIndex = UIntToOH(io.innerCtrl.bits.hartsel)
when (hartHaltedWrEn) {
// add those harts halting and remove those in reset
haltedBitRegs := (haltedBitRegs | hartHaltedIdIndex) & ~(hartIsInResetSync.asUInt)
}.elsewhen (hartResumingWrEn) {
// remove those harts in reset and those in resume
haltedBitRegs := (haltedBitRegs & ~(hartResumingIdIndex)) & ~(hartIsInResetSync.asUInt)
}.otherwise {
// remove those harts in reset
haltedBitRegs := haltedBitRegs & ~(hartIsInResetSync.asUInt)
}
when (hartResumingWrEn) {
// remove those harts in resume and those in reset
resumeReqRegs := (resumeReqRegs & ~(hartResumingIdIndex)) & ~(hartIsInResetSync.asUInt)
}
when (resumereq) {
// set all sleceted harts to resumeReq, remove those in reset
resumeReqRegs := (resumeReqRegs | hamaskWrSel.asUInt) & ~(hartIsInResetSync.asUInt)
}
}
when (resumereq) {
// next cycle resumeAcls will be the negation of next cycle resumeReqRegs
resumeAcks := (~resumeReqRegs & ~(hamaskWrSel.asUInt))
}.otherwise {
resumeAcks := ~resumeReqRegs
}
//---- AUTHDATA
val authRdEnMaybe = WireInit(false.B)
val authWrEnMaybe = WireInit(false.B)
io.auth.map { a =>
a.dmactive := io.dmactive
a.dmAuthRead := authRdEnMaybe & ~a.dmAuthBusy
a.dmAuthWrite := authWrEnMaybe & ~a.dmAuthBusy
}
val dmstatusRegFields = RegFieldGroup("dmi_dmstatus", Some("debug module status register"), Seq(
RegField.r(4, DMSTATUSRdData.version, RegFieldDesc("version", "version", reset=Some(2))),
RegField.r(1, DMSTATUSRdData.confstrptrvalid, RegFieldDesc("confstrptrvalid", "confstrptrvalid", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.hasresethaltreq, RegFieldDesc("hasresethaltreq", "hasresethaltreq", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.authbusy, RegFieldDesc("authbusy", "authbusy", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.authenticated, RegFieldDesc("authenticated", "authenticated", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.anyhalted, RegFieldDesc("anyhalted", "anyhalted", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allhalted, RegFieldDesc("allhalted", "allhalted", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.anyrunning, RegFieldDesc("anyrunning", "anyrunning", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.allrunning, RegFieldDesc("allrunning", "allrunning", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.anyunavail, RegFieldDesc("anyunavail", "anyunavail", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allunavail, RegFieldDesc("allunavail", "allunavail", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.anynonexistent, RegFieldDesc("anynonexistent", "anynonexistent", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allnonexistent, RegFieldDesc("allnonexistent", "allnonexistent", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.anyresumeack, RegFieldDesc("anyresumeack", "anyresumeack", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.allresumeack, RegFieldDesc("allresumeack", "allresumeack", reset=Some(1))),
RegField.r(1, DMSTATUSRdData.anyhavereset, RegFieldDesc("anyhavereset", "anyhavereset", reset=Some(0))),
RegField.r(1, DMSTATUSRdData.allhavereset, RegFieldDesc("allhavereset", "allhavereset", reset=Some(0))),
RegField(2),
RegField.r(1, DMSTATUSRdData.impebreak, RegFieldDesc("impebreak", "impebreak", reset=Some(if (cfg.hasImplicitEbreak) 1 else 0)))
))
val dmcs2RegFields = RegFieldGroup("dmi_dmcs2", Some("debug module control/status register 2"), Seq(
WNotifyVal(1, DMCS2RdData.hgselect, DMCS2WrData.hgselect, hgselectWrEn,
RegFieldDesc("hgselect", "select halt groups or external triggers", reset=Some(0), volatile=true)),
WNotifyVal(1, 0.U, DMCS2WrData.hgwrite, hgwriteWrEn,
RegFieldDesc("hgwrite", "write 1 to change halt groups", reset=None, access=RegFieldAccessType.W)),
WNotifyVal(5, DMCS2RdData.haltgroup, DMCS2WrData.haltgroup, haltgroupWrEn,
RegFieldDesc("haltgroup", "halt group", reset=Some(0), volatile=true)),
if (nExtTriggers > 1)
WNotifyVal(4, DMCS2RdData.exttrigger, DMCS2WrData.exttrigger, exttriggerWrEn,
RegFieldDesc("exttrigger", "external trigger select", reset=Some(0), volatile=true))
else RegField(4)
))
val abstractcsRegFields = RegFieldGroup("dmi_abstractcs", Some("abstract command control/status"), Seq(
RegField.r(4, ABSTRACTCSRdData.datacount, RegFieldDesc("datacount", "number of DATA registers", reset=Some(cfg.nAbstractDataWords))),
RegField(4),
WNotifyVal(3, ABSTRACTCSRdData.cmderr, ABSTRACTCSWrData.cmderr, ABSTRACTCSWrEnMaybe,
RegFieldDesc("cmderr", "command error", reset=Some(0), wrType=Some(RegFieldWrType.ONE_TO_CLEAR))),
RegField(1),
RegField.r(1, ABSTRACTCSRdData.busy, RegFieldDesc("busy", "busy", reset=Some(0))),
RegField(11),
RegField.r(5, ABSTRACTCSRdData.progbufsize, RegFieldDesc("progbufsize", "number of PROGBUF registers", reset=Some(cfg.nProgramBufferWords)))
))
val (sbcsFields, sbAddrFields, sbDataFields):
(Seq[RegField], Seq[Seq[RegField]], Seq[Seq[RegField]]) = sb2tlOpt.map{ sb2tl =>
SystemBusAccessModule(sb2tl, io.dmactive, dmAuthenticated)(p)
}.getOrElse((Seq.empty[RegField], Seq.fill[Seq[RegField]](4)(Seq.empty[RegField]), Seq.fill[Seq[RegField]](4)(Seq.empty[RegField])))
//--------------------------------------------------------------
// Program Buffer Access (DMI ... System Bus can override)
//--------------------------------------------------------------
val omRegMap = dmiNode.regmap(
(DMI_DMSTATUS << 2) -> dmstatusRegFields,
//TODO (DMI_CFGSTRADDR0 << 2) -> cfgStrAddrFields,
(DMI_DMCS2 << 2) -> (if (nHaltGroups > 0) dmcs2RegFields else Nil),
(DMI_HALTSUM0 << 2) -> RegFieldGroup("dmi_haltsum0", Some("Halt Summary 0"),
Seq(RegField.r(32, HALTSUM0RdData.asUInt, RegFieldDesc("dmi_haltsum0", "halt summary 0")))),
(DMI_HALTSUM1 << 2) -> RegFieldGroup("dmi_haltsum1", Some("Halt Summary 1"),
Seq(RegField.r(32, HALTSUM1RdData.asUInt, RegFieldDesc("dmi_haltsum1", "halt summary 1")))),
(DMI_ABSTRACTCS << 2) -> abstractcsRegFields,
(DMI_ABSTRACTAUTO<< 2) -> RegFieldGroup("dmi_abstractauto", Some("abstract command autoexec"), Seq(
WNotifyVal(cfg.nAbstractDataWords, ABSTRACTAUTORdData.autoexecdata, ABSTRACTAUTOWrData.autoexecdata, autoexecdataWrEnMaybe,
RegFieldDesc("autoexecdata", "abstract command data autoexec", reset=Some(0))),
RegField(16-cfg.nAbstractDataWords),
WNotifyVal(cfg.nProgramBufferWords, ABSTRACTAUTORdData.autoexecprogbuf, ABSTRACTAUTOWrData.autoexecprogbuf, autoexecprogbufWrEnMaybe,
RegFieldDesc("autoexecprogbuf", "abstract command progbuf autoexec", reset=Some(0))))),
(DMI_COMMAND << 2) -> RegFieldGroup("dmi_command", Some("Abstract Command Register"),
Seq(RWNotify(32, COMMANDRdData.asUInt, COMMANDWrDataVal, COMMANDRdEn, COMMANDWrEnMaybe,
Some(RegFieldDesc("dmi_command", "abstract command register", reset=Some(0), volatile=true))))),
(DMI_DATA0 << 2) -> RegFieldGroup("dmi_data", Some("abstract command data registers"), abstractDataMem.zipWithIndex.map{case (x, i) =>
RWNotify(8, Mux(dmAuthenticated, x, 0.U), abstractDataNxt(i),
dmiAbstractDataRdEn(i),
dmiAbstractDataWrEnMaybe(i),
Some(RegFieldDesc(s"dmi_data_$i", s"abstract command data register $i", reset = Some(0), volatile=true)))}, false),
(DMI_PROGBUF0 << 2) -> RegFieldGroup("dmi_progbuf", Some("abstract command progbuf registers"), programBufferMem.zipWithIndex.map{case (x, i) =>
RWNotify(8, Mux(dmAuthenticated, x, 0.U), programBufferNxt(i),
dmiProgramBufferRdEn(i),
dmiProgramBufferWrEnMaybe(i),
Some(RegFieldDesc(s"dmi_progbuf_$i", s"abstract command progbuf register $i", reset = Some(0))))}, false),
(DMI_AUTHDATA << 2) -> (if (cfg.hasAuthentication) RegFieldGroup("dmi_authdata", Some("authentication data exchange register"),
Seq(RWNotify(32, io.auth.get.dmAuthRdata, io.auth.get.dmAuthWdata, authRdEnMaybe, authWrEnMaybe,
Some(RegFieldDesc("authdata", "authentication data exchange", volatile=true))))) else Nil),
(DMI_SBCS << 2) -> sbcsFields,
(DMI_SBDATA0 << 2) -> sbDataFields(0),
(DMI_SBDATA1 << 2) -> sbDataFields(1),
(DMI_SBDATA2 << 2) -> sbDataFields(2),
(DMI_SBDATA3 << 2) -> sbDataFields(3),
(DMI_SBADDRESS0 << 2) -> sbAddrFields(0),
(DMI_SBADDRESS1 << 2) -> sbAddrFields(1),
(DMI_SBADDRESS2 << 2) -> sbAddrFields(2),
(DMI_SBADDRESS3 << 2) -> sbAddrFields(3)
)
// Abstract data mem is written by both the tile link interface and DMI...
abstractDataMem.zipWithIndex.foreach { case (x, i) =>
when (dmAuthenticated && dmiAbstractDataWrEnMaybe(i) && dmiAbstractDataAccessLegal) {
x := abstractDataNxt(i)
}
}
// ... and also by custom register read (if implemented)
val (customs, customParams) = customNode.in.unzip
val needCustom = (customs.size > 0) && (customParams.head.addrs.size > 0)
def getNeedCustom = () => needCustom
if (needCustom) {
val (custom, customP) = customNode.in.head
require(customP.width % 8 == 0, s"Debug Custom width must be divisible by 8, not ${customP.width}")
val custom_data = custom.data.asBools
val custom_bytes = Seq.tabulate(customP.width/8){i => custom_data.slice(i*8, (i+1)*8).asUInt}
when (custom.ready && custom.valid) {
(abstractDataMem zip custom_bytes).zipWithIndex.foreach {case ((a, b), i) =>
a := b
}
}
}
programBufferMem.zipWithIndex.foreach { case (x, i) =>
when (dmAuthenticated && dmiProgramBufferWrEnMaybe(i) && dmiProgramBufferAccessLegal) {
x := programBufferNxt(i)
}
}
//--------------------------------------------------------------
// "Variable" ROM Generation
//--------------------------------------------------------------
val goReg = Reg(Bool())
val goAbstract = WireInit(false.B)
val goCustom = WireInit(false.B)
val jalAbstract = WireInit(Instructions.JAL.value.U.asTypeOf(new GeneratedUJ()))
jalAbstract.setImm(ABSTRACT(cfg) - WHERETO)
when (~io.dmactive){
goReg := false.B
}.otherwise {
when (goAbstract) {
goReg := true.B
}.elsewhen (hartGoingWrEn){
assert(hartGoingId === 0.U, "Unexpected 'GOING' hart.")//Chisel3 #540 %x, expected %x", hartGoingId, 0.U)
goReg := false.B
}
}
class flagBundle extends Bundle {
val reserved = UInt(6.W)
val resume = Bool()
val go = Bool()
}
val flags = WireInit(VecInit(Seq.fill(1 << selectedHartReg.getWidth) {0.U.asTypeOf(new flagBundle())} ))
assert ((hartSelFuncs.hartSelToHartId(selectedHartReg) < flags.size.U),
s"HartSel to HartId Mapping is illegal for this Debug Implementation, because HartID must be < ${flags.size} for it to work.")
flags(hartSelFuncs.hartSelToHartId(selectedHartReg)).go := goReg
for (component <- 0 until nComponents) {
val componentSel = WireInit(component.U)
flags(hartSelFuncs.hartSelToHartId(componentSel)).resume := resumeReqRegs(component)
}
//----------------------------
// Abstract Command Decoding & Generation
//----------------------------
val accessRegisterCommandWr = WireInit(COMMANDWrData.asUInt.asTypeOf(new ACCESS_REGISTERFields()))
/** real COMMAND*/
val accessRegisterCommandReg = WireInit(COMMANDReg.asUInt.asTypeOf(new ACCESS_REGISTERFields()))
// TODO: Quick Access
class GeneratedI extends Bundle {
val imm = UInt(12.W)
val rs1 = UInt(5.W)
val funct3 = UInt(3.W)
val rd = UInt(5.W)
val opcode = UInt(7.W)
}
class GeneratedS extends Bundle {
val immhi = UInt(7.W)
val rs2 = UInt(5.W)
val rs1 = UInt(5.W)
val funct3 = UInt(3.W)
val immlo = UInt(5.W)
val opcode = UInt(7.W)
}
class GeneratedCSR extends Bundle {
val imm = UInt(12.W)
val rs1 = UInt(5.W)
val funct3 = UInt(3.W)
val rd = UInt(5.W)
val opcode = UInt(7.W)
}
class GeneratedUJ extends Bundle {
val imm3 = UInt(1.W)
val imm0 = UInt(10.W)
val imm1 = UInt(1.W)
val imm2 = UInt(8.W)
val rd = UInt(5.W)
val opcode = UInt(7.W)
def setImm(imm: Int) : Unit = {
// TODO: Check bounds of imm.
require(imm % 2 == 0, "Immediate must be even for UJ encoding.")
val immWire = WireInit(imm.S(21.W))
val immBits = WireInit(VecInit(immWire.asBools))
imm0 := immBits.slice(1, 1 + 10).asUInt
imm1 := immBits.slice(11, 11 + 11).asUInt
imm2 := immBits.slice(12, 12 + 8).asUInt
imm3 := immBits.slice(20, 20 + 1).asUInt
}
}
require((cfg.atzero && cfg.nAbstractInstructions == 2) || (!cfg.atzero && cfg.nAbstractInstructions == 5),
"Mismatch between DebugModuleParams atzero and nAbstractInstructions")
val abstractGeneratedMem = Reg(Vec(cfg.nAbstractInstructions, (UInt(32.W))))
def abstractGeneratedI(cfg: DebugModuleParams): UInt = {
val inst = Wire(new GeneratedI())
val offset = if (cfg.atzero) DATA else (DATA-0x800) & 0xFFF
val base = if (cfg.atzero) 0.U else Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U)
inst.opcode := (Instructions.LW.value.U.asTypeOf(new GeneratedI())).opcode
inst.rd := (accessRegisterCommandReg.regno & 0x1F.U)
inst.funct3 := accessRegisterCommandReg.size
inst.rs1 := base
inst.imm := offset.U
inst.asUInt
}
def abstractGeneratedS(cfg: DebugModuleParams): UInt = {
val inst = Wire(new GeneratedS())
val offset = if (cfg.atzero) DATA else (DATA-0x800) & 0xFFF
val base = if (cfg.atzero) 0.U else Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U)
inst.opcode := (Instructions.SW.value.U.asTypeOf(new GeneratedS())).opcode
inst.immlo := (offset & 0x1F).U
inst.funct3 := accessRegisterCommandReg.size
inst.rs1 := base
inst.rs2 := (accessRegisterCommandReg.regno & 0x1F.U)
inst.immhi := (offset >> 5).U
inst.asUInt
}
def abstractGeneratedCSR: UInt = {
val inst = Wire(new GeneratedCSR())
val base = Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U) // use s0 as base for odd regs, s1 as base for even regs
inst := (Instructions.CSRRW.value.U.asTypeOf(new GeneratedCSR()))
inst.imm := CSRs.dscratch1.U
inst.rs1 := base
inst.rd := base
inst.asUInt
}
val nop = Wire(new GeneratedI())
nop := Instructions.ADDI.value.U.asTypeOf(new GeneratedI())
nop.rd := 0.U
nop.rs1 := 0.U
nop.imm := 0.U
val isa = Wire(new GeneratedI())
isa := Instructions.ADDIW.value.U.asTypeOf(new GeneratedI())
isa.rd := 0.U
isa.rs1 := 0.U
isa.imm := 0.U
when (goAbstract) {
if (cfg.nAbstractInstructions == 2) {
// ABSTRACT(0): Transfer: LW or SW, else NOP
// ABSTRACT(1): Postexec: NOP else EBREAK
abstractGeneratedMem(0) := Mux(accessRegisterCommandReg.transfer,
Mux(accessRegisterCommandReg.write, abstractGeneratedI(cfg), abstractGeneratedS(cfg)),
nop.asUInt
)
abstractGeneratedMem(1) := Mux(accessRegisterCommandReg.postexec,
nop.asUInt,
Instructions.EBREAK.value.U)
} else {
// Entry: All regs in GPRs, dscratch1=offset 0x800 in DM
// ABSTRACT(0): CheckISA: ADDW or NOP (exception here if size=3 and not RV64)
// ABSTRACT(1): CSRRW s1,dscratch1,s1 or CSRRW s0,dscratch1,s0
// ABSTRACT(2): Transfer: LW, SW, LD, SD else NOP
// ABSTRACT(3): CSRRW s1,dscratch1,s1 or CSRRW s0,dscratch1,s0
// ABSTRACT(4): Postexec: NOP else EBREAK
abstractGeneratedMem(0) := Mux(accessRegisterCommandReg.transfer && accessRegisterCommandReg.size =/= 2.U, isa.asUInt, nop.asUInt)
abstractGeneratedMem(1) := abstractGeneratedCSR
abstractGeneratedMem(2) := Mux(accessRegisterCommandReg.transfer,
Mux(accessRegisterCommandReg.write, abstractGeneratedI(cfg), abstractGeneratedS(cfg)),
nop.asUInt
)
abstractGeneratedMem(3) := abstractGeneratedCSR
abstractGeneratedMem(4) := Mux(accessRegisterCommandReg.postexec,
nop.asUInt,
Instructions.EBREAK.value.U)
}
}
//--------------------------------------------------------------
// Drive Custom Access
//--------------------------------------------------------------
if (needCustom) {
val (custom, customP) = customNode.in.head
custom.addr := accessRegisterCommandReg.regno
custom.valid := goCustom
}
//--------------------------------------------------------------
// Hart Bus Access
//--------------------------------------------------------------
tlNode.regmap(
// This memory is writable.
HALTED -> Seq(WNotifyWire(sbIdWidth, hartHaltedId, hartHaltedWrEn,
"debug_hart_halted", "Debug ROM Causes hart to write its hartID here when it is in Debug Mode.")),
GOING -> Seq(WNotifyWire(sbIdWidth, hartGoingId, hartGoingWrEn,
"debug_hart_going", "Debug ROM causes hart to write 0 here when it begins executing Debug Mode instructions.")),
RESUMING -> Seq(WNotifyWire(sbIdWidth, hartResumingId, hartResumingWrEn,
"debug_hart_resuming", "Debug ROM causes hart to write its hartID here when it leaves Debug Mode.")),
EXCEPTION -> Seq(WNotifyWire(sbIdWidth, hartExceptionId, hartExceptionWrEn,
"debug_hart_exception", "Debug ROM causes hart to write 0 here if it gets an exception in Debug Mode.")),
DATA -> RegFieldGroup("debug_data", Some("Data used to communicate with Debug Module"),
abstractDataMem.zipWithIndex.map {case (x, i) => RegField(8, x, RegFieldDesc(s"debug_data_$i", ""))}),
PROGBUF(cfg)-> RegFieldGroup("debug_progbuf", Some("Program buffer used to communicate with Debug Module"),
programBufferMem.zipWithIndex.map {case (x, i) => RegField(8, x, RegFieldDesc(s"debug_progbuf_$i", ""))}),
// These sections are read-only.
IMPEBREAK(cfg)-> {if (cfg.hasImplicitEbreak) Seq(RegField.r(32, Instructions.EBREAK.value.U,
RegFieldDesc("debug_impebreak", "Debug Implicit EBREAK", reset=Some(Instructions.EBREAK.value)))) else Nil},
WHERETO -> Seq(RegField.r(32, jalAbstract.asUInt, RegFieldDesc("debug_whereto", "Instruction filled in by Debug Module to control hart in Debug Mode", volatile = true))),
ABSTRACT(cfg) -> RegFieldGroup("debug_abstract", Some("Instructions generated by Debug Module"),
abstractGeneratedMem.zipWithIndex.map{ case (x,i) => RegField.r(32, x, RegFieldDesc(s"debug_abstract_$i", "", volatile=true))}),
FLAGS -> RegFieldGroup("debug_flags", Some("Memory region used to control hart going/resuming in Debug Mode"),
if (nComponents == 1) {
Seq.tabulate(1024) { i => RegField.r(8, flags(0).asUInt, RegFieldDesc(s"debug_flags_$i", "", volatile=true)) }
} else {
flags.zipWithIndex.map{case(x, i) => RegField.r(8, x.asUInt, RegFieldDesc(s"debug_flags_$i", "", volatile=true))}
}),
ROMBASE -> RegFieldGroup("debug_rom", Some("Debug ROM"),
(if (cfg.atzero) DebugRomContents() else DebugRomNonzeroContents()).zipWithIndex.map{case (x, i) =>
RegField.r(8, (x & 0xFF).U(8.W), RegFieldDesc(s"debug_rom_$i", "", reset=Some(x)))})
)
// Override System Bus accesses with dmactive reset.
when (~io.dmactive){
abstractDataMem.foreach {x => x := 0.U}
programBufferMem.foreach {x => x := 0.U}
}
//--------------------------------------------------------------
// Abstract Command State Machine
//--------------------------------------------------------------
object CtrlState extends scala.Enumeration {
type CtrlState = Value
val Waiting, CheckGenerate, Exec, Custom = Value
def apply( t : Value) : UInt = {
t.id.U(log2Up(values.size).W)
}
}
import CtrlState._
// This is not an initialization!
val ctrlStateReg = Reg(chiselTypeOf(CtrlState(Waiting)))
val hartHalted = haltedBitRegs(if (nComponents == 1) 0.U(0.W) else selectedHartReg)
val ctrlStateNxt = WireInit(ctrlStateReg)
//------------------------
// DMI Register Control and Status
abstractCommandBusy := (ctrlStateReg =/= CtrlState(Waiting))
ABSTRACTCSWrEnLegal := (ctrlStateReg === CtrlState(Waiting))
COMMANDWrEnLegal := (ctrlStateReg === CtrlState(Waiting))
ABSTRACTAUTOWrEnLegal := (ctrlStateReg === CtrlState(Waiting))
dmiAbstractDataAccessLegal := (ctrlStateReg === CtrlState(Waiting))
dmiProgramBufferAccessLegal := (ctrlStateReg === CtrlState(Waiting))
errorBusy := (ABSTRACTCSWrEnMaybe && ~ABSTRACTCSWrEnLegal) ||
(autoexecdataWrEnMaybe && ~ABSTRACTAUTOWrEnLegal) ||
(autoexecprogbufWrEnMaybe && ~ABSTRACTAUTOWrEnLegal) ||
(COMMANDWrEnMaybe && ~COMMANDWrEnLegal) ||
(dmiAbstractDataAccess && ~dmiAbstractDataAccessLegal) ||
(dmiProgramBufferAccess && ~dmiProgramBufferAccessLegal)
// TODO: Maybe Quick Access
val commandWrIsAccessRegister = (COMMANDWrData.cmdtype === DebugAbstractCommandType.AccessRegister.id.U)
val commandRegIsAccessRegister = (COMMANDReg.cmdtype === DebugAbstractCommandType.AccessRegister.id.U)
val commandWrIsUnsupported = COMMANDWrEn && !commandWrIsAccessRegister
val commandRegIsUnsupported = WireInit(true.B)
val commandRegBadHaltResume = WireInit(false.B)
// We only support abstract commands for GPRs and any custom registers, if specified.
val accessRegIsLegalSize = (accessRegisterCommandReg.size === 2.U) || (accessRegisterCommandReg.size === 3.U)
val accessRegIsGPR = (accessRegisterCommandReg.regno >= 0x1000.U && accessRegisterCommandReg.regno <= 0x101F.U) && accessRegIsLegalSize
val accessRegIsCustom = if (needCustom) {
val (custom, customP) = customNode.in.head
customP.addrs.foldLeft(false.B){
(result, current) => result || (current.U === accessRegisterCommandReg.regno)}
} else false.B
when (commandRegIsAccessRegister) {
when (accessRegIsCustom && accessRegisterCommandReg.transfer && accessRegisterCommandReg.write === false.B) {
commandRegIsUnsupported := false.B
}.elsewhen (!accessRegisterCommandReg.transfer || accessRegIsGPR) {
commandRegIsUnsupported := false.B
commandRegBadHaltResume := ~hartHalted
}
}
val wrAccessRegisterCommand = COMMANDWrEn && commandWrIsAccessRegister && (ABSTRACTCSReg.cmderr === 0.U)
val regAccessRegisterCommand = autoexec && commandRegIsAccessRegister && (ABSTRACTCSReg.cmderr === 0.U)
//------------------------
// Variable ROM STATE MACHINE
// -----------------------
when (ctrlStateReg === CtrlState(Waiting)){
when (wrAccessRegisterCommand || regAccessRegisterCommand) {
ctrlStateNxt := CtrlState(CheckGenerate)
}.elsewhen (commandWrIsUnsupported) { // These checks are really on the command type.
errorUnsupported := true.B
}.elsewhen (autoexec && commandRegIsUnsupported) {
errorUnsupported := true.B
}
}.elsewhen (ctrlStateReg === CtrlState(CheckGenerate)){
// We use this state to ensure that the COMMAND has been
// registered by the time that we need to use it, to avoid
// generating it directly from the COMMANDWrData.
// This 'commandRegIsUnsupported' is really just checking the
// AccessRegisterCommand parameters (regno)
when (commandRegIsUnsupported) {
errorUnsupported := true.B
ctrlStateNxt := CtrlState(Waiting)
}.elsewhen (commandRegBadHaltResume){
errorHaltResume := true.B
ctrlStateNxt := CtrlState(Waiting)
}.otherwise {
when(accessRegIsCustom) {
ctrlStateNxt := CtrlState(Custom)
}.otherwise {
ctrlStateNxt := CtrlState(Exec)
goAbstract := true.B
}
}
}.elsewhen (ctrlStateReg === CtrlState(Exec)) {
// We can't just look at 'hartHalted' here, because
// hartHaltedWrEn is overloaded to mean 'got an ebreak'
// which may have happened when we were already halted.
when(goReg === false.B && hartHaltedWrEn && (hartSelFuncs.hartIdToHartSel(hartHaltedId) === selectedHartReg)){
ctrlStateNxt := CtrlState(Waiting)
}
when(hartExceptionWrEn) {
assert(hartExceptionId === 0.U, "Unexpected 'EXCEPTION' hart")//Chisel3 #540, %x, expected %x", hartExceptionId, 0.U)
ctrlStateNxt := CtrlState(Waiting)
errorException := true.B
}
}.elsewhen (ctrlStateReg === CtrlState(Custom)) {
assert(needCustom.B, "Should not be in custom state unless we need it.")
goCustom := true.B
val (custom, customP) = customNode.in.head
when (custom.ready && custom.valid) {
ctrlStateNxt := CtrlState(Waiting)
}
}
when (~io.dmactive || ~dmAuthenticated) {
ctrlStateReg := CtrlState(Waiting)
}.otherwise {
ctrlStateReg := ctrlStateNxt
}
assert ((!io.dmactive || !hartExceptionWrEn || ctrlStateReg === CtrlState(Exec)),
"Unexpected EXCEPTION write: should only get it in Debug Module EXEC state")
}
}
// Wrapper around TL Debug Module Inner and an Async DMI Sink interface.
// Handles the synchronization of dmactive, which is used as a synchronous reset
// inside the Inner block.
// Also is the Sink side of hartsel & resumereq fields of DMCONTROL.
class TLDebugModuleInnerAsync(device: Device, getNComponents: () => Int, beatBytes: Int)(implicit p: Parameters) extends LazyModule{
val cfg = p(DebugModuleKey).get
val dmInner = LazyModule(new TLDebugModuleInner(device, getNComponents, beatBytes))
val dmiXing = LazyModule(new TLAsyncCrossingSink(AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset)))
val dmiNode = dmiXing.node
val tlNode = dmInner.tlNode
dmInner.dmiNode := dmiXing.node
// Require that there are no registers in TL interface, so that spurious
// processor accesses to the DM don't need to enable the clock. We don't
// require this property of the SBA, because the debugger is responsible for
// raising dmactive (hence enabling the clock) during these transactions.
require(dmInner.tlNode.concurrency == 0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
// Clock/reset domains:
// debug_clock / debug_reset = Debug inner domain
// tl_clock / tl_reset = tilelink domain (External: clock / reset)
//
val io = IO(new Bundle {
val debug_clock = Input(Clock())
val debug_reset = Input(Reset())
val tl_clock = Input(Clock())
val tl_reset = Input(Reset())
// These are all asynchronous and come from Outer
/** reset signal for DM */
val dmactive = Input(Bool())
/** conrol signals for Inner
*
* generated in Outer
*/
val innerCtrl = Flipped(new AsyncBundle(new DebugInternalBundle(getNComponents()), AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset)))
// This comes from tlClk domain.
/** debug available status */
val debugUnavail = Input(Vec(getNComponents(), Bool()))
/** debug interruption*/
val hgDebugInt = Output(Vec(getNComponents(), Bool()))
val extTrigger = (p(DebugModuleKey).get.nExtTriggers > 0).option(new DebugExtTriggerIO())
/** vector to indicate which hart is in reset
*
* dm receives it from core and sends it to Inner
*/
val hartIsInReset = Input(Vec(getNComponents(), Bool()))
/** Debug Authentication signals from core */
val auth = p(DebugModuleKey).get.hasAuthentication.option(new DebugAuthenticationIO())
})
val rf_reset = IO(Input(Reset())) // RF transform
childClock := io.debug_clock
childReset := io.debug_reset
override def provideImplicitClockToLazyChildren = true
val dmactive_synced = withClockAndReset(childClock, childReset) {
val dmactive_synced = AsyncResetSynchronizerShiftReg(in=io.dmactive, sync=3, name=Some("dmactiveSync"))
dmInner.module.clock := io.debug_clock
dmInner.module.reset := io.debug_reset
dmInner.module.io.tl_clock := io.tl_clock
dmInner.module.io.tl_reset := io.tl_reset
dmInner.module.io.dmactive := dmactive_synced
dmInner.module.io.innerCtrl <> FromAsyncBundle(io.innerCtrl)
dmInner.module.io.debugUnavail := io.debugUnavail
io.hgDebugInt := dmInner.module.io.hgDebugInt
io.extTrigger.foreach { x => dmInner.module.io.extTrigger.foreach {y => x <> y}}
dmInner.module.io.hartIsInReset := io.hartIsInReset
io.auth.foreach { x => dmInner.module.io.auth.foreach {y => x <> y}}
dmactive_synced
}
}
}
/** Create a version of the TLDebugModule which includes a synchronization interface
* internally for the DMI. This is no longer optional outside of this module
* because the Clock must run when tl_clock isn't running or tl_reset is asserted.
*/
class TLDebugModule(beatBytes: Int)(implicit p: Parameters) extends LazyModule {
val device = new SimpleDevice("debug-controller", Seq("sifive,debug-013","riscv,debug-013")){
override val alwaysExtended = true
override def describe(resources: ResourceBindings): Description = {
val Description(name, mapping) = super.describe(resources)
val attach = Map(
"debug-attach" -> (
(if (p(ExportDebug).apb) Seq(ResourceString("apb")) else Seq()) ++
(if (p(ExportDebug).jtag) Seq(ResourceString("jtag")) else Seq()) ++
(if (p(ExportDebug).cjtag) Seq(ResourceString("cjtag")) else Seq()) ++
(if (p(ExportDebug).dmi) Seq(ResourceString("dmi")) else Seq())))
Description(name, mapping ++ attach)
}
}
val dmOuter : TLDebugModuleOuterAsync = LazyModule(new TLDebugModuleOuterAsync(device)(p))
val dmInner : TLDebugModuleInnerAsync = LazyModule(new TLDebugModuleInnerAsync(device, () => {dmOuter.dmOuter.intnode.edges.out.size}, beatBytes)(p))
val node = dmInner.tlNode
val intnode = dmOuter.intnode
val apbNodeOpt = dmOuter.apbNodeOpt
dmInner.dmiNode := dmOuter.dmiInnerNode
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val nComponents = dmOuter.dmOuter.intnode.edges.out.size
// Clock/reset domains:
// tl_clock / tl_reset = tilelink domain
// debug_clock / debug_reset = Inner debug (synchronous to tl_clock)
// apb_clock / apb_reset = Outer debug with APB
// dmiClock / dmiReset = Outer debug without APB
//
val io = IO(new Bundle {
val debug_clock = Input(Clock())
val debug_reset = Input(Reset())
val tl_clock = Input(Clock())
val tl_reset = Input(Reset())
/** Debug control signals generated in Outer */
val ctrl = new DebugCtrlBundle(nComponents)
/** Debug Module Interface bewteen DM and DTM
*
* The DTM provides access to one or more Debug Modules (DMs) using DMI
*/
val dmi = (!p(ExportDebug).apb).option(Flipped(new ClockedDMIIO()))
val apb_clock = p(ExportDebug).apb.option(Input(Clock()))
val apb_reset = p(ExportDebug).apb.option(Input(Reset()))
val extTrigger = (p(DebugModuleKey).get.nExtTriggers > 0).option(new DebugExtTriggerIO())
/** vector to indicate which hart is in reset
*
* dm receives it from core and sends it to Inner
*/
val hartIsInReset = Input(Vec(nComponents, Bool()))
/** hart reset request generated by hartreset-logic in Outer */
val hartResetReq = p(DebugModuleKey).get.hasHartResets.option(Output(Vec(nComponents, Bool())))
/** Debug Authentication signals from core */
val auth = p(DebugModuleKey).get.hasAuthentication.option(new DebugAuthenticationIO())
})
childClock := io.tl_clock
childReset := io.tl_reset
override def provideImplicitClockToLazyChildren = true
dmOuter.module.io.dmi.foreach { dmOuterDMI =>
dmOuterDMI <> io.dmi.get.dmi
dmOuter.module.io.dmi_reset := io.dmi.get.dmiReset
dmOuter.module.io.dmi_clock := io.dmi.get.dmiClock
dmOuter.module.rf_reset := io.dmi.get.dmiReset
}
(io.apb_clock zip io.apb_reset) foreach { case (c, r) =>
dmOuter.module.io.dmi_reset := r
dmOuter.module.io.dmi_clock := c
dmOuter.module.rf_reset := r
}
dmInner.module.rf_reset := io.debug_reset
dmInner.module.io.debug_clock := io.debug_clock
dmInner.module.io.debug_reset := io.debug_reset
dmInner.module.io.tl_clock := io.tl_clock
dmInner.module.io.tl_reset := io.tl_reset
dmInner.module.io.innerCtrl <> dmOuter.module.io.innerCtrl
dmInner.module.io.dmactive := dmOuter.module.io.ctrl.dmactive
dmInner.module.io.debugUnavail := io.ctrl.debugUnavail
dmOuter.module.io.hgDebugInt := dmInner.module.io.hgDebugInt
io.ctrl <> dmOuter.module.io.ctrl
io.extTrigger.foreach { x => dmInner.module.io.extTrigger.foreach {y => x <> y}}
dmInner.module.io.hartIsInReset := io.hartIsInReset
io.hartResetReq.foreach { x => dmOuter.module.io.hartResetReq.foreach {y => x := y}}
io.auth.foreach { x => dmOuter.module.io.dmAuthenticated.get := x.dmAuthenticated }
io.auth.foreach { x => dmInner.module.io.auth.foreach {y => x <> y}}
}
}
| module TLDebugModuleOuterAsync( // @[Debug.scala:709:9]
output [2:0] auto_asource_out_a_mem_0_opcode, // @[LazyModuleImp.scala:107:25]
output [8:0] auto_asource_out_a_mem_0_address, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_asource_out_a_mem_0_data, // @[LazyModuleImp.scala:107:25]
input auto_asource_out_a_ridx, // @[LazyModuleImp.scala:107:25]
output auto_asource_out_a_widx, // @[LazyModuleImp.scala:107:25]
input auto_asource_out_a_safe_ridx_valid, // @[LazyModuleImp.scala:107:25]
output auto_asource_out_a_safe_widx_valid, // @[LazyModuleImp.scala:107:25]
output auto_asource_out_a_safe_source_reset_n, // @[LazyModuleImp.scala:107:25]
input auto_asource_out_a_safe_sink_reset_n, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_asource_out_d_mem_0_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_asource_out_d_mem_0_size, // @[LazyModuleImp.scala:107:25]
input auto_asource_out_d_mem_0_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_asource_out_d_mem_0_data, // @[LazyModuleImp.scala:107:25]
output auto_asource_out_d_ridx, // @[LazyModuleImp.scala:107:25]
input auto_asource_out_d_widx, // @[LazyModuleImp.scala:107:25]
output auto_asource_out_d_safe_ridx_valid, // @[LazyModuleImp.scala:107:25]
input auto_asource_out_d_safe_widx_valid, // @[LazyModuleImp.scala:107:25]
input auto_asource_out_d_safe_source_reset_n, // @[LazyModuleImp.scala:107:25]
output auto_asource_out_d_safe_sink_reset_n, // @[LazyModuleImp.scala:107:25]
output auto_int_out_sync_0, // @[LazyModuleImp.scala:107:25]
input io_dmi_clock, // @[Debug.scala:713:16]
input io_dmi_reset, // @[Debug.scala:713:16]
output io_dmi_req_ready, // @[Debug.scala:713:16]
input io_dmi_req_valid, // @[Debug.scala:713:16]
input [6:0] io_dmi_req_bits_addr, // @[Debug.scala:713:16]
input [31:0] io_dmi_req_bits_data, // @[Debug.scala:713:16]
input [1:0] io_dmi_req_bits_op, // @[Debug.scala:713:16]
input io_dmi_resp_ready, // @[Debug.scala:713:16]
output io_dmi_resp_valid, // @[Debug.scala:713:16]
output [31:0] io_dmi_resp_bits_data, // @[Debug.scala:713:16]
output [1:0] io_dmi_resp_bits_resp, // @[Debug.scala:713:16]
output io_ctrl_dmactive, // @[Debug.scala:713:16]
input io_ctrl_dmactiveAck, // @[Debug.scala:713:16]
output io_innerCtrl_mem_0_resumereq, // @[Debug.scala:713:16]
output [9:0] io_innerCtrl_mem_0_hartsel, // @[Debug.scala:713:16]
output io_innerCtrl_mem_0_ackhavereset, // @[Debug.scala:713:16]
output io_innerCtrl_mem_0_hrmask_0, // @[Debug.scala:713:16]
input io_innerCtrl_ridx, // @[Debug.scala:713:16]
output io_innerCtrl_widx, // @[Debug.scala:713:16]
input io_innerCtrl_safe_ridx_valid, // @[Debug.scala:713:16]
output io_innerCtrl_safe_widx_valid, // @[Debug.scala:713:16]
output io_innerCtrl_safe_source_reset_n, // @[Debug.scala:713:16]
input io_innerCtrl_safe_sink_reset_n, // @[Debug.scala:713:16]
input io_hgDebugInt_0 // @[Debug.scala:713:16]
);
wire _io_innerCtrl_source_io_enq_ready; // @[AsyncQueue.scala:220:24]
wire _dmactiveAck_dmactiveAckSync_io_q; // @[ShiftReg.scala:45:23]
wire _asource_auto_in_a_ready; // @[AsyncCrossing.scala:94:29]
wire _asource_auto_in_d_valid; // @[AsyncCrossing.scala:94:29]
wire [2:0] _asource_auto_in_d_bits_opcode; // @[AsyncCrossing.scala:94:29]
wire [1:0] _asource_auto_in_d_bits_param; // @[AsyncCrossing.scala:94:29]
wire [1:0] _asource_auto_in_d_bits_size; // @[AsyncCrossing.scala:94:29]
wire _asource_auto_in_d_bits_source; // @[AsyncCrossing.scala:94:29]
wire _asource_auto_in_d_bits_sink; // @[AsyncCrossing.scala:94:29]
wire _asource_auto_in_d_bits_denied; // @[AsyncCrossing.scala:94:29]
wire [31:0] _asource_auto_in_d_bits_data; // @[AsyncCrossing.scala:94:29]
wire _asource_auto_in_d_bits_corrupt; // @[AsyncCrossing.scala:94:29]
wire _dmiBypass_auto_node_out_out_a_valid; // @[Debug.scala:704:29]
wire [2:0] _dmiBypass_auto_node_out_out_a_bits_opcode; // @[Debug.scala:704:29]
wire [8:0] _dmiBypass_auto_node_out_out_a_bits_address; // @[Debug.scala:704:29]
wire [31:0] _dmiBypass_auto_node_out_out_a_bits_data; // @[Debug.scala:704:29]
wire _dmiBypass_auto_node_out_out_d_ready; // @[Debug.scala:704:29]
wire _dmiBypass_auto_node_in_in_a_ready; // @[Debug.scala:704:29]
wire _dmiBypass_auto_node_in_in_d_valid; // @[Debug.scala:704:29]
wire [2:0] _dmiBypass_auto_node_in_in_d_bits_opcode; // @[Debug.scala:704:29]
wire [1:0] _dmiBypass_auto_node_in_in_d_bits_param; // @[Debug.scala:704:29]
wire [1:0] _dmiBypass_auto_node_in_in_d_bits_size; // @[Debug.scala:704:29]
wire _dmiBypass_auto_node_in_in_d_bits_sink; // @[Debug.scala:704:29]
wire _dmiBypass_auto_node_in_in_d_bits_denied; // @[Debug.scala:704:29]
wire [31:0] _dmiBypass_auto_node_in_in_d_bits_data; // @[Debug.scala:704:29]
wire _dmiBypass_auto_node_in_in_d_bits_corrupt; // @[Debug.scala:704:29]
wire _dmOuter_auto_dmi_in_a_ready; // @[Debug.scala:700:27]
wire _dmOuter_auto_dmi_in_d_valid; // @[Debug.scala:700:27]
wire [2:0] _dmOuter_auto_dmi_in_d_bits_opcode; // @[Debug.scala:700:27]
wire [31:0] _dmOuter_auto_dmi_in_d_bits_data; // @[Debug.scala:700:27]
wire _dmOuter_auto_int_out_0; // @[Debug.scala:700:27]
wire _dmOuter_io_ctrl_dmactive; // @[Debug.scala:700:27]
wire _dmOuter_io_innerCtrl_valid; // @[Debug.scala:700:27]
wire _dmOuter_io_innerCtrl_bits_resumereq; // @[Debug.scala:700:27]
wire [9:0] _dmOuter_io_innerCtrl_bits_hartsel; // @[Debug.scala:700:27]
wire _dmOuter_io_innerCtrl_bits_ackhavereset; // @[Debug.scala:700:27]
wire _dmOuter_io_innerCtrl_bits_hrmask_0; // @[Debug.scala:700:27]
wire _dmi2tl_auto_out_a_valid; // @[Debug.scala:678:28]
wire [2:0] _dmi2tl_auto_out_a_bits_opcode; // @[Debug.scala:678:28]
wire [8:0] _dmi2tl_auto_out_a_bits_address; // @[Debug.scala:678:28]
wire [31:0] _dmi2tl_auto_out_a_bits_data; // @[Debug.scala:678:28]
wire _dmi2tl_auto_out_d_ready; // @[Debug.scala:678:28]
wire _dmiXbar_auto_anon_in_a_ready; // @[Debug.scala:675:28]
wire _dmiXbar_auto_anon_in_d_valid; // @[Debug.scala:675:28]
wire _dmiXbar_auto_anon_in_d_bits_denied; // @[Debug.scala:675:28]
wire [31:0] _dmiXbar_auto_anon_in_d_bits_data; // @[Debug.scala:675:28]
wire _dmiXbar_auto_anon_in_d_bits_corrupt; // @[Debug.scala:675:28]
wire _dmiXbar_auto_anon_out_1_a_valid; // @[Debug.scala:675:28]
wire [2:0] _dmiXbar_auto_anon_out_1_a_bits_opcode; // @[Debug.scala:675:28]
wire [6:0] _dmiXbar_auto_anon_out_1_a_bits_address; // @[Debug.scala:675:28]
wire [31:0] _dmiXbar_auto_anon_out_1_a_bits_data; // @[Debug.scala:675:28]
wire _dmiXbar_auto_anon_out_1_d_ready; // @[Debug.scala:675:28]
wire _dmiXbar_auto_anon_out_0_a_valid; // @[Debug.scala:675:28]
wire [2:0] _dmiXbar_auto_anon_out_0_a_bits_opcode; // @[Debug.scala:675:28]
wire [8:0] _dmiXbar_auto_anon_out_0_a_bits_address; // @[Debug.scala:675:28]
wire [31:0] _dmiXbar_auto_anon_out_0_a_bits_data; // @[Debug.scala:675:28]
wire _dmiXbar_auto_anon_out_0_d_ready; // @[Debug.scala:675:28]
TLXbar_dmixbar_i1_o2_a9d32s1k1z2u dmiXbar ( // @[Debug.scala:675:28]
.clock (io_dmi_clock),
.reset (io_dmi_reset),
.auto_anon_in_a_ready (_dmiXbar_auto_anon_in_a_ready),
.auto_anon_in_a_valid (_dmi2tl_auto_out_a_valid), // @[Debug.scala:678:28]
.auto_anon_in_a_bits_opcode (_dmi2tl_auto_out_a_bits_opcode), // @[Debug.scala:678:28]
.auto_anon_in_a_bits_address (_dmi2tl_auto_out_a_bits_address), // @[Debug.scala:678:28]
.auto_anon_in_a_bits_data (_dmi2tl_auto_out_a_bits_data), // @[Debug.scala:678:28]
.auto_anon_in_d_ready (_dmi2tl_auto_out_d_ready), // @[Debug.scala:678:28]
.auto_anon_in_d_valid (_dmiXbar_auto_anon_in_d_valid),
.auto_anon_in_d_bits_denied (_dmiXbar_auto_anon_in_d_bits_denied),
.auto_anon_in_d_bits_data (_dmiXbar_auto_anon_in_d_bits_data),
.auto_anon_in_d_bits_corrupt (_dmiXbar_auto_anon_in_d_bits_corrupt),
.auto_anon_out_1_a_ready (_dmOuter_auto_dmi_in_a_ready), // @[Debug.scala:700:27]
.auto_anon_out_1_a_valid (_dmiXbar_auto_anon_out_1_a_valid),
.auto_anon_out_1_a_bits_opcode (_dmiXbar_auto_anon_out_1_a_bits_opcode),
.auto_anon_out_1_a_bits_address (_dmiXbar_auto_anon_out_1_a_bits_address),
.auto_anon_out_1_a_bits_data (_dmiXbar_auto_anon_out_1_a_bits_data),
.auto_anon_out_1_d_ready (_dmiXbar_auto_anon_out_1_d_ready),
.auto_anon_out_1_d_valid (_dmOuter_auto_dmi_in_d_valid), // @[Debug.scala:700:27]
.auto_anon_out_1_d_bits_opcode (_dmOuter_auto_dmi_in_d_bits_opcode), // @[Debug.scala:700:27]
.auto_anon_out_1_d_bits_data (_dmOuter_auto_dmi_in_d_bits_data), // @[Debug.scala:700:27]
.auto_anon_out_0_a_ready (_dmiBypass_auto_node_in_in_a_ready), // @[Debug.scala:704:29]
.auto_anon_out_0_a_valid (_dmiXbar_auto_anon_out_0_a_valid),
.auto_anon_out_0_a_bits_opcode (_dmiXbar_auto_anon_out_0_a_bits_opcode),
.auto_anon_out_0_a_bits_address (_dmiXbar_auto_anon_out_0_a_bits_address),
.auto_anon_out_0_a_bits_data (_dmiXbar_auto_anon_out_0_a_bits_data),
.auto_anon_out_0_d_ready (_dmiXbar_auto_anon_out_0_d_ready),
.auto_anon_out_0_d_valid (_dmiBypass_auto_node_in_in_d_valid), // @[Debug.scala:704:29]
.auto_anon_out_0_d_bits_opcode (_dmiBypass_auto_node_in_in_d_bits_opcode), // @[Debug.scala:704:29]
.auto_anon_out_0_d_bits_param (_dmiBypass_auto_node_in_in_d_bits_param), // @[Debug.scala:704:29]
.auto_anon_out_0_d_bits_size (_dmiBypass_auto_node_in_in_d_bits_size), // @[Debug.scala:704:29]
.auto_anon_out_0_d_bits_sink (_dmiBypass_auto_node_in_in_d_bits_sink), // @[Debug.scala:704:29]
.auto_anon_out_0_d_bits_denied (_dmiBypass_auto_node_in_in_d_bits_denied), // @[Debug.scala:704:29]
.auto_anon_out_0_d_bits_data (_dmiBypass_auto_node_in_in_d_bits_data), // @[Debug.scala:704:29]
.auto_anon_out_0_d_bits_corrupt (_dmiBypass_auto_node_in_in_d_bits_corrupt) // @[Debug.scala:704:29]
); // @[Debug.scala:675:28]
DMIToTL dmi2tl ( // @[Debug.scala:678:28]
.auto_out_a_ready (_dmiXbar_auto_anon_in_a_ready), // @[Debug.scala:675:28]
.auto_out_a_valid (_dmi2tl_auto_out_a_valid),
.auto_out_a_bits_opcode (_dmi2tl_auto_out_a_bits_opcode),
.auto_out_a_bits_address (_dmi2tl_auto_out_a_bits_address),
.auto_out_a_bits_data (_dmi2tl_auto_out_a_bits_data),
.auto_out_d_ready (_dmi2tl_auto_out_d_ready),
.auto_out_d_valid (_dmiXbar_auto_anon_in_d_valid), // @[Debug.scala:675:28]
.auto_out_d_bits_denied (_dmiXbar_auto_anon_in_d_bits_denied), // @[Debug.scala:675:28]
.auto_out_d_bits_data (_dmiXbar_auto_anon_in_d_bits_data), // @[Debug.scala:675:28]
.auto_out_d_bits_corrupt (_dmiXbar_auto_anon_in_d_bits_corrupt), // @[Debug.scala:675:28]
.io_dmi_req_ready (io_dmi_req_ready),
.io_dmi_req_valid (io_dmi_req_valid),
.io_dmi_req_bits_addr (io_dmi_req_bits_addr),
.io_dmi_req_bits_data (io_dmi_req_bits_data),
.io_dmi_req_bits_op (io_dmi_req_bits_op),
.io_dmi_resp_ready (io_dmi_resp_ready),
.io_dmi_resp_valid (io_dmi_resp_valid),
.io_dmi_resp_bits_data (io_dmi_resp_bits_data),
.io_dmi_resp_bits_resp (io_dmi_resp_bits_resp)
); // @[Debug.scala:678:28]
TLDebugModuleOuter dmOuter ( // @[Debug.scala:700:27]
.clock (io_dmi_clock),
.reset (io_dmi_reset),
.auto_dmi_in_a_ready (_dmOuter_auto_dmi_in_a_ready),
.auto_dmi_in_a_valid (_dmiXbar_auto_anon_out_1_a_valid), // @[Debug.scala:675:28]
.auto_dmi_in_a_bits_opcode (_dmiXbar_auto_anon_out_1_a_bits_opcode), // @[Debug.scala:675:28]
.auto_dmi_in_a_bits_address (_dmiXbar_auto_anon_out_1_a_bits_address), // @[Debug.scala:675:28]
.auto_dmi_in_a_bits_data (_dmiXbar_auto_anon_out_1_a_bits_data), // @[Debug.scala:675:28]
.auto_dmi_in_d_ready (_dmiXbar_auto_anon_out_1_d_ready), // @[Debug.scala:675:28]
.auto_dmi_in_d_valid (_dmOuter_auto_dmi_in_d_valid),
.auto_dmi_in_d_bits_opcode (_dmOuter_auto_dmi_in_d_bits_opcode),
.auto_dmi_in_d_bits_data (_dmOuter_auto_dmi_in_d_bits_data),
.auto_int_out_0 (_dmOuter_auto_int_out_0),
.io_ctrl_dmactive (_dmOuter_io_ctrl_dmactive),
.io_ctrl_dmactiveAck (_dmactiveAck_dmactiveAckSync_io_q), // @[ShiftReg.scala:45:23]
.io_innerCtrl_ready (_io_innerCtrl_source_io_enq_ready), // @[AsyncQueue.scala:220:24]
.io_innerCtrl_valid (_dmOuter_io_innerCtrl_valid),
.io_innerCtrl_bits_resumereq (_dmOuter_io_innerCtrl_bits_resumereq),
.io_innerCtrl_bits_hartsel (_dmOuter_io_innerCtrl_bits_hartsel),
.io_innerCtrl_bits_ackhavereset (_dmOuter_io_innerCtrl_bits_ackhavereset),
.io_innerCtrl_bits_hrmask_0 (_dmOuter_io_innerCtrl_bits_hrmask_0),
.io_hgDebugInt_0 (io_hgDebugInt_0)
); // @[Debug.scala:700:27]
IntSyncCrossingSource_n1x1_Registered intsource ( // @[Crossing.scala:29:31]
.auto_in_0 (_dmOuter_auto_int_out_0), // @[Debug.scala:700:27]
.auto_out_sync_0 (auto_int_out_sync_0)
); // @[Crossing.scala:29:31]
TLBusBypass dmiBypass ( // @[Debug.scala:704:29]
.clock (io_dmi_clock),
.reset (io_dmi_reset),
.auto_node_out_out_a_ready (_asource_auto_in_a_ready), // @[AsyncCrossing.scala:94:29]
.auto_node_out_out_a_valid (_dmiBypass_auto_node_out_out_a_valid),
.auto_node_out_out_a_bits_opcode (_dmiBypass_auto_node_out_out_a_bits_opcode),
.auto_node_out_out_a_bits_address (_dmiBypass_auto_node_out_out_a_bits_address),
.auto_node_out_out_a_bits_data (_dmiBypass_auto_node_out_out_a_bits_data),
.auto_node_out_out_d_ready (_dmiBypass_auto_node_out_out_d_ready),
.auto_node_out_out_d_valid (_asource_auto_in_d_valid), // @[AsyncCrossing.scala:94:29]
.auto_node_out_out_d_bits_opcode (_asource_auto_in_d_bits_opcode), // @[AsyncCrossing.scala:94:29]
.auto_node_out_out_d_bits_param (_asource_auto_in_d_bits_param), // @[AsyncCrossing.scala:94:29]
.auto_node_out_out_d_bits_size (_asource_auto_in_d_bits_size), // @[AsyncCrossing.scala:94:29]
.auto_node_out_out_d_bits_source (_asource_auto_in_d_bits_source), // @[AsyncCrossing.scala:94:29]
.auto_node_out_out_d_bits_sink (_asource_auto_in_d_bits_sink), // @[AsyncCrossing.scala:94:29]
.auto_node_out_out_d_bits_denied (_asource_auto_in_d_bits_denied), // @[AsyncCrossing.scala:94:29]
.auto_node_out_out_d_bits_data (_asource_auto_in_d_bits_data), // @[AsyncCrossing.scala:94:29]
.auto_node_out_out_d_bits_corrupt (_asource_auto_in_d_bits_corrupt), // @[AsyncCrossing.scala:94:29]
.auto_node_in_in_a_ready (_dmiBypass_auto_node_in_in_a_ready),
.auto_node_in_in_a_valid (_dmiXbar_auto_anon_out_0_a_valid), // @[Debug.scala:675:28]
.auto_node_in_in_a_bits_opcode (_dmiXbar_auto_anon_out_0_a_bits_opcode), // @[Debug.scala:675:28]
.auto_node_in_in_a_bits_address (_dmiXbar_auto_anon_out_0_a_bits_address), // @[Debug.scala:675:28]
.auto_node_in_in_a_bits_data (_dmiXbar_auto_anon_out_0_a_bits_data), // @[Debug.scala:675:28]
.auto_node_in_in_d_ready (_dmiXbar_auto_anon_out_0_d_ready), // @[Debug.scala:675:28]
.auto_node_in_in_d_valid (_dmiBypass_auto_node_in_in_d_valid),
.auto_node_in_in_d_bits_opcode (_dmiBypass_auto_node_in_in_d_bits_opcode),
.auto_node_in_in_d_bits_param (_dmiBypass_auto_node_in_in_d_bits_param),
.auto_node_in_in_d_bits_size (_dmiBypass_auto_node_in_in_d_bits_size),
.auto_node_in_in_d_bits_sink (_dmiBypass_auto_node_in_in_d_bits_sink),
.auto_node_in_in_d_bits_denied (_dmiBypass_auto_node_in_in_d_bits_denied),
.auto_node_in_in_d_bits_data (_dmiBypass_auto_node_in_in_d_bits_data),
.auto_node_in_in_d_bits_corrupt (_dmiBypass_auto_node_in_in_d_bits_corrupt),
.io_bypass (~_dmOuter_io_ctrl_dmactive | ~_dmactiveAck_dmactiveAckSync_io_q) // @[ShiftReg.scala:45:23]
); // @[Debug.scala:704:29]
TLAsyncCrossingSource_a9d32s1k1z2u asource ( // @[AsyncCrossing.scala:94:29]
.clock (io_dmi_clock),
.reset (io_dmi_reset),
.auto_in_a_ready (_asource_auto_in_a_ready),
.auto_in_a_valid (_dmiBypass_auto_node_out_out_a_valid), // @[Debug.scala:704:29]
.auto_in_a_bits_opcode (_dmiBypass_auto_node_out_out_a_bits_opcode), // @[Debug.scala:704:29]
.auto_in_a_bits_address (_dmiBypass_auto_node_out_out_a_bits_address), // @[Debug.scala:704:29]
.auto_in_a_bits_data (_dmiBypass_auto_node_out_out_a_bits_data), // @[Debug.scala:704:29]
.auto_in_d_ready (_dmiBypass_auto_node_out_out_d_ready), // @[Debug.scala:704:29]
.auto_in_d_valid (_asource_auto_in_d_valid),
.auto_in_d_bits_opcode (_asource_auto_in_d_bits_opcode),
.auto_in_d_bits_param (_asource_auto_in_d_bits_param),
.auto_in_d_bits_size (_asource_auto_in_d_bits_size),
.auto_in_d_bits_source (_asource_auto_in_d_bits_source),
.auto_in_d_bits_sink (_asource_auto_in_d_bits_sink),
.auto_in_d_bits_denied (_asource_auto_in_d_bits_denied),
.auto_in_d_bits_data (_asource_auto_in_d_bits_data),
.auto_in_d_bits_corrupt (_asource_auto_in_d_bits_corrupt),
.auto_out_a_mem_0_opcode (auto_asource_out_a_mem_0_opcode),
.auto_out_a_mem_0_address (auto_asource_out_a_mem_0_address),
.auto_out_a_mem_0_data (auto_asource_out_a_mem_0_data),
.auto_out_a_ridx (auto_asource_out_a_ridx),
.auto_out_a_widx (auto_asource_out_a_widx),
.auto_out_a_safe_ridx_valid (auto_asource_out_a_safe_ridx_valid),
.auto_out_a_safe_widx_valid (auto_asource_out_a_safe_widx_valid),
.auto_out_a_safe_source_reset_n (auto_asource_out_a_safe_source_reset_n),
.auto_out_a_safe_sink_reset_n (auto_asource_out_a_safe_sink_reset_n),
.auto_out_d_mem_0_opcode (auto_asource_out_d_mem_0_opcode),
.auto_out_d_mem_0_size (auto_asource_out_d_mem_0_size),
.auto_out_d_mem_0_source (auto_asource_out_d_mem_0_source),
.auto_out_d_mem_0_data (auto_asource_out_d_mem_0_data),
.auto_out_d_ridx (auto_asource_out_d_ridx),
.auto_out_d_widx (auto_asource_out_d_widx),
.auto_out_d_safe_ridx_valid (auto_asource_out_d_safe_ridx_valid),
.auto_out_d_safe_widx_valid (auto_asource_out_d_safe_widx_valid),
.auto_out_d_safe_source_reset_n (auto_asource_out_d_safe_source_reset_n),
.auto_out_d_safe_sink_reset_n (auto_asource_out_d_safe_sink_reset_n)
); // @[AsyncCrossing.scala:94:29]
AsyncResetSynchronizerShiftReg_w1_d3_i0 dmactiveAck_dmactiveAckSync ( // @[ShiftReg.scala:45:23]
.clock (io_dmi_clock),
.reset (io_dmi_reset),
.io_d (io_ctrl_dmactiveAck),
.io_q (_dmactiveAck_dmactiveAckSync_io_q)
); // @[ShiftReg.scala:45:23]
AsyncQueueSource_DebugInternalBundle io_innerCtrl_source ( // @[AsyncQueue.scala:220:24]
.clock (io_dmi_clock),
.reset (io_dmi_reset),
.io_enq_ready (_io_innerCtrl_source_io_enq_ready),
.io_enq_valid (_dmOuter_io_innerCtrl_valid), // @[Debug.scala:700:27]
.io_enq_bits_resumereq (_dmOuter_io_innerCtrl_bits_resumereq), // @[Debug.scala:700:27]
.io_enq_bits_hartsel (_dmOuter_io_innerCtrl_bits_hartsel), // @[Debug.scala:700:27]
.io_enq_bits_ackhavereset (_dmOuter_io_innerCtrl_bits_ackhavereset), // @[Debug.scala:700:27]
.io_enq_bits_hrmask_0 (_dmOuter_io_innerCtrl_bits_hrmask_0), // @[Debug.scala:700:27]
.io_async_mem_0_resumereq (io_innerCtrl_mem_0_resumereq),
.io_async_mem_0_hartsel (io_innerCtrl_mem_0_hartsel),
.io_async_mem_0_ackhavereset (io_innerCtrl_mem_0_ackhavereset),
.io_async_mem_0_hrmask_0 (io_innerCtrl_mem_0_hrmask_0),
.io_async_ridx (io_innerCtrl_ridx),
.io_async_widx (io_innerCtrl_widx),
.io_async_safe_ridx_valid (io_innerCtrl_safe_ridx_valid),
.io_async_safe_widx_valid (io_innerCtrl_safe_widx_valid),
.io_async_safe_source_reset_n (io_innerCtrl_safe_source_reset_n),
.io_async_safe_sink_reset_n (io_innerCtrl_safe_sink_reset_n)
); // @[AsyncQueue.scala:220:24]
assign io_ctrl_dmactive = _dmOuter_io_ctrl_dmactive; // @[Debug.scala:700:27, :709:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
File common.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of
the University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
object consts {
/*------------------------------------------------------------------------
| For rounding to integer values, rounding mode 'odd' rounds to minimum
| magnitude instead, same as 'minMag'.
*------------------------------------------------------------------------*/
def round_near_even = "b000".U(3.W)
def round_minMag = "b001".U(3.W)
def round_min = "b010".U(3.W)
def round_max = "b011".U(3.W)
def round_near_maxMag = "b100".U(3.W)
def round_odd = "b110".U(3.W)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def tininess_beforeRounding = 0.U
def tininess_afterRounding = 1.U
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def flRoundOpt_sigMSBitAlwaysZero = 1
def flRoundOpt_subnormsAlwaysExact = 2
def flRoundOpt_neverUnderflows = 4
def flRoundOpt_neverOverflows = 8
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def divSqrtOpt_twoBitsPerCycle = 16
}
class RawFloat(val expWidth: Int, val sigWidth: Int) extends Bundle
{
val isNaN: Bool = Bool() // overrides all other fields
val isInf: Bool = Bool() // overrides 'isZero', 'sExp', and 'sig'
val isZero: Bool = Bool() // overrides 'sExp' and 'sig'
val sign: Bool = Bool()
val sExp: SInt = SInt((expWidth + 2).W)
val sig: UInt = UInt((sigWidth + 1).W) // 2 m.s. bits cannot both be 0
}
//*** CHANGE THIS INTO A '.isSigNaN' METHOD OF THE 'RawFloat' CLASS:
object isSigNaNRawFloat
{
def apply(in: RawFloat): Bool = in.isNaN && !in.sig(in.sigWidth - 2)
}
| module MulAddRecFNToRaw_preMul_e8_s24_13( // @[MulAddRecFN.scala:71:7]
input [32:0] io_a, // @[MulAddRecFN.scala:74:16]
input [32:0] io_c, // @[MulAddRecFN.scala:74:16]
output [23:0] io_mulAddA, // @[MulAddRecFN.scala:74:16]
output [47:0] io_mulAddC, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isSigNaNAny, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isNaNAOrB, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isInfA, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isZeroA, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_signProd, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isNaNC, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isInfC, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isZeroC, // @[MulAddRecFN.scala:74:16]
output [9:0] io_toPostMul_sExpSum, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_doSubMags, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_CIsDominant, // @[MulAddRecFN.scala:74:16]
output [4:0] io_toPostMul_CDom_CAlignDist, // @[MulAddRecFN.scala:74:16]
output [25:0] io_toPostMul_highAlignedSigC, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_bit0AlignedSigC // @[MulAddRecFN.scala:74:16]
);
wire rawA_sign; // @[rawFloatFromRecFN.scala:55:23]
wire rawA_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire [32:0] io_a_0 = io_a; // @[MulAddRecFN.scala:71:7]
wire [32:0] io_c_0 = io_c; // @[MulAddRecFN.scala:71:7]
wire [8:0] rawB_exp = 9'h100; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawB_isZero_T = 3'h4; // @[rawFloatFromRecFN.scala:52:28]
wire [1:0] _rawB_isSpecial_T = 2'h2; // @[rawFloatFromRecFN.scala:53:28]
wire [9:0] rawB_sExp = 10'h100; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire [9:0] _rawB_out_sExp_T = 10'h100; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire [1:0] _rawB_out_sig_T_1 = 2'h1; // @[rawFloatFromRecFN.scala:61:32]
wire [22:0] _rawB_out_sig_T_2 = 23'h0; // @[rawFloatFromRecFN.scala:61:49]
wire [24:0] rawB_sig = 25'h800000; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire [24:0] _rawB_out_sig_T_3 = 25'h800000; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire _rawB_out_isInf_T_1 = 1'h1; // @[rawFloatFromRecFN.scala:57:36, :61:35]
wire _rawB_out_sig_T = 1'h1; // @[rawFloatFromRecFN.scala:57:36, :61:35]
wire _io_toPostMul_isSigNaNAny_T_4 = 1'h1; // @[rawFloatFromRecFN.scala:57:36, :61:35]
wire io_toPostMul_isInfB = 1'h0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isZeroB = 1'h0; // @[MulAddRecFN.scala:71:7]
wire rawB_isZero = 1'h0; // @[rawFloatFromRecFN.scala:52:53]
wire rawB_isSpecial = 1'h0; // @[rawFloatFromRecFN.scala:53:53]
wire rawB_isNaN = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire rawB_isInf = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire rawB_isZero_0 = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire rawB_sign = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire _rawB_out_isNaN_T = 1'h0; // @[rawFloatFromRecFN.scala:56:41]
wire _rawB_out_isNaN_T_1 = 1'h0; // @[rawFloatFromRecFN.scala:56:33]
wire _rawB_out_isInf_T = 1'h0; // @[rawFloatFromRecFN.scala:57:41]
wire _rawB_out_isInf_T_2 = 1'h0; // @[rawFloatFromRecFN.scala:57:33]
wire _rawB_out_sign_T = 1'h0; // @[rawFloatFromRecFN.scala:59:25]
wire _signProd_T_1 = 1'h0; // @[MulAddRecFN.scala:97:49]
wire _doSubMags_T_1 = 1'h0; // @[MulAddRecFN.scala:102:49]
wire _io_toPostMul_isSigNaNAny_T_3 = 1'h0; // @[common.scala:82:56]
wire _io_toPostMul_isSigNaNAny_T_5 = 1'h0; // @[common.scala:82:46]
wire [23:0] io_mulAddB = 24'h800000; // @[MulAddRecFN.scala:71:7, :74:16, :142:16]
wire [32:0] io_b = 33'h80000000; // @[MulAddRecFN.scala:71:7, :74:16]
wire [1:0] io_op = 2'h0; // @[MulAddRecFN.scala:71:7, :74:16]
wire [47:0] _io_mulAddC_T; // @[MulAddRecFN.scala:143:30]
wire _io_toPostMul_isSigNaNAny_T_10; // @[MulAddRecFN.scala:146:58]
wire _io_toPostMul_isNaNAOrB_T; // @[MulAddRecFN.scala:148:42]
wire rawA_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawA_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire signProd; // @[MulAddRecFN.scala:97:42]
wire rawC_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawC_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawC_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire doSubMags; // @[MulAddRecFN.scala:102:42]
wire CIsDominant; // @[MulAddRecFN.scala:110:23]
wire [4:0] _io_toPostMul_CDom_CAlignDist_T; // @[MulAddRecFN.scala:161:47]
wire [25:0] _io_toPostMul_highAlignedSigC_T; // @[MulAddRecFN.scala:163:20]
wire _io_toPostMul_bit0AlignedSigC_T; // @[MulAddRecFN.scala:164:48]
wire io_toPostMul_isSigNaNAny_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isNaNAOrB_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isInfA_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isZeroA_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_signProd_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isNaNC_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isInfC_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isZeroC_0; // @[MulAddRecFN.scala:71:7]
wire [9:0] io_toPostMul_sExpSum_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_doSubMags_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_CIsDominant_0; // @[MulAddRecFN.scala:71:7]
wire [4:0] io_toPostMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:71:7]
wire [25:0] io_toPostMul_highAlignedSigC_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_bit0AlignedSigC_0; // @[MulAddRecFN.scala:71:7]
wire [23:0] io_mulAddA_0; // @[MulAddRecFN.scala:71:7]
wire [47:0] io_mulAddC_0; // @[MulAddRecFN.scala:71:7]
wire [8:0] rawA_exp = io_a_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawA_isZero_T = rawA_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawA_isZero_0 = _rawA_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
assign rawA_isZero = rawA_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawA_isSpecial_T = rawA_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawA_isSpecial = &_rawA_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawA_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawA_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
assign _io_toPostMul_isNaNAOrB_T = rawA_isNaN; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_isInfA_0 = rawA_isInf; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_isZeroA_0 = rawA_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire _rawA_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire _isMinCAlign_T = rawA_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] _rawA_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire _signProd_T = rawA_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] _rawA_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire [9:0] rawA_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawA_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawA_out_isNaN_T = rawA_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawA_out_isInf_T = rawA_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawA_out_isNaN_T_1 = rawA_isSpecial & _rawA_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawA_isNaN = _rawA_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawA_out_isInf_T_1 = ~_rawA_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawA_out_isInf_T_2 = rawA_isSpecial & _rawA_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawA_isInf = _rawA_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawA_out_sign_T = io_a_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawA_sign = _rawA_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawA_out_sExp_T = {1'h0, rawA_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawA_sExp = _rawA_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawA_out_sig_T = ~rawA_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawA_out_sig_T_1 = {1'h0, _rawA_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawA_out_sig_T_2 = io_a_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawA_out_sig_T_3 = {_rawA_out_sig_T_1, _rawA_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawA_sig = _rawA_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire [8:0] rawC_exp = io_c_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawC_isZero_T = rawC_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawC_isZero_0 = _rawC_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
assign rawC_isZero = rawC_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawC_isSpecial_T = rawC_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawC_isSpecial = &_rawC_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawC_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
assign io_toPostMul_isNaNC_0 = rawC_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire _rawC_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
assign io_toPostMul_isInfC_0 = rawC_isInf; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_isZeroC_0 = rawC_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire _rawC_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawC_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawC_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawC_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawC_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawC_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawC_out_isNaN_T = rawC_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawC_out_isInf_T = rawC_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawC_out_isNaN_T_1 = rawC_isSpecial & _rawC_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawC_isNaN = _rawC_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawC_out_isInf_T_1 = ~_rawC_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawC_out_isInf_T_2 = rawC_isSpecial & _rawC_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawC_isInf = _rawC_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawC_out_sign_T = io_c_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawC_sign = _rawC_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawC_out_sExp_T = {1'h0, rawC_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawC_sExp = _rawC_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawC_out_sig_T = ~rawC_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawC_out_sig_T_1 = {1'h0, _rawC_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawC_out_sig_T_2 = io_c_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawC_out_sig_T_3 = {_rawC_out_sig_T_1, _rawC_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawC_sig = _rawC_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
assign signProd = _signProd_T; // @[MulAddRecFN.scala:97:{30,42}]
assign io_toPostMul_signProd_0 = signProd; // @[MulAddRecFN.scala:71:7, :97:42]
wire [10:0] _sExpAlignedProd_T = {rawA_sExp[9], rawA_sExp} + 11'h100; // @[rawFloatFromRecFN.scala:55:23]
wire [11:0] _sExpAlignedProd_T_1 = {_sExpAlignedProd_T[10], _sExpAlignedProd_T} - 12'hE5; // @[MulAddRecFN.scala:100:{19,32}]
wire [10:0] _sExpAlignedProd_T_2 = _sExpAlignedProd_T_1[10:0]; // @[MulAddRecFN.scala:100:32]
wire [10:0] sExpAlignedProd = _sExpAlignedProd_T_2; // @[MulAddRecFN.scala:100:32]
wire _doSubMags_T = signProd ^ rawC_sign; // @[rawFloatFromRecFN.scala:55:23]
assign doSubMags = _doSubMags_T; // @[MulAddRecFN.scala:102:{30,42}]
assign io_toPostMul_doSubMags_0 = doSubMags; // @[MulAddRecFN.scala:71:7, :102:42]
wire [11:0] _GEN = {sExpAlignedProd[10], sExpAlignedProd}; // @[MulAddRecFN.scala:100:32, :106:42]
wire [11:0] _sNatCAlignDist_T = _GEN - {{2{rawC_sExp[9]}}, rawC_sExp}; // @[rawFloatFromRecFN.scala:55:23]
wire [10:0] _sNatCAlignDist_T_1 = _sNatCAlignDist_T[10:0]; // @[MulAddRecFN.scala:106:42]
wire [10:0] sNatCAlignDist = _sNatCAlignDist_T_1; // @[MulAddRecFN.scala:106:42]
wire [9:0] posNatCAlignDist = sNatCAlignDist[9:0]; // @[MulAddRecFN.scala:106:42, :107:42]
wire _isMinCAlign_T_1 = $signed(sNatCAlignDist) < 11'sh0; // @[MulAddRecFN.scala:106:42, :108:69]
wire isMinCAlign = _isMinCAlign_T | _isMinCAlign_T_1; // @[MulAddRecFN.scala:108:{35,50,69}]
wire _CIsDominant_T = ~rawC_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire _CIsDominant_T_1 = posNatCAlignDist < 10'h19; // @[MulAddRecFN.scala:107:42, :110:60]
wire _CIsDominant_T_2 = isMinCAlign | _CIsDominant_T_1; // @[MulAddRecFN.scala:108:50, :110:{39,60}]
assign CIsDominant = _CIsDominant_T & _CIsDominant_T_2; // @[MulAddRecFN.scala:110:{9,23,39}]
assign io_toPostMul_CIsDominant_0 = CIsDominant; // @[MulAddRecFN.scala:71:7, :110:23]
wire _CAlignDist_T = posNatCAlignDist < 10'h4A; // @[MulAddRecFN.scala:107:42, :114:34]
wire [6:0] _CAlignDist_T_1 = posNatCAlignDist[6:0]; // @[MulAddRecFN.scala:107:42, :115:33]
wire [6:0] _CAlignDist_T_2 = _CAlignDist_T ? _CAlignDist_T_1 : 7'h4A; // @[MulAddRecFN.scala:114:{16,34}, :115:33]
wire [6:0] CAlignDist = isMinCAlign ? 7'h0 : _CAlignDist_T_2; // @[MulAddRecFN.scala:108:50, :112:12, :114:16]
wire [24:0] _mainAlignedSigC_T = ~rawC_sig; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] _mainAlignedSigC_T_1 = doSubMags ? _mainAlignedSigC_T : rawC_sig; // @[rawFloatFromRecFN.scala:55:23]
wire [52:0] _mainAlignedSigC_T_2 = {53{doSubMags}}; // @[MulAddRecFN.scala:102:42, :120:53]
wire [77:0] _mainAlignedSigC_T_3 = {_mainAlignedSigC_T_1, _mainAlignedSigC_T_2}; // @[MulAddRecFN.scala:120:{13,46,53}]
wire [77:0] _mainAlignedSigC_T_4 = _mainAlignedSigC_T_3; // @[MulAddRecFN.scala:120:{46,94}]
wire [77:0] mainAlignedSigC = $signed($signed(_mainAlignedSigC_T_4) >>> CAlignDist); // @[MulAddRecFN.scala:112:12, :120:{94,100}]
wire [26:0] _reduced4CExtra_T = {rawC_sig, 2'h0}; // @[rawFloatFromRecFN.scala:55:23]
wire _reduced4CExtra_reducedVec_0_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_1_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_2_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_3_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_4_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_5_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_6_T_1; // @[primitives.scala:123:57]
wire reduced4CExtra_reducedVec_0; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_1; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_2; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_3; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_4; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_5; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_6; // @[primitives.scala:118:30]
wire [3:0] _reduced4CExtra_reducedVec_0_T = _reduced4CExtra_T[3:0]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_0_T_1 = |_reduced4CExtra_reducedVec_0_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_0 = _reduced4CExtra_reducedVec_0_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _reduced4CExtra_reducedVec_1_T = _reduced4CExtra_T[7:4]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_1_T_1 = |_reduced4CExtra_reducedVec_1_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_1 = _reduced4CExtra_reducedVec_1_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _reduced4CExtra_reducedVec_2_T = _reduced4CExtra_T[11:8]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_2_T_1 = |_reduced4CExtra_reducedVec_2_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_2 = _reduced4CExtra_reducedVec_2_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _reduced4CExtra_reducedVec_3_T = _reduced4CExtra_T[15:12]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_3_T_1 = |_reduced4CExtra_reducedVec_3_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_3 = _reduced4CExtra_reducedVec_3_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _reduced4CExtra_reducedVec_4_T = _reduced4CExtra_T[19:16]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_4_T_1 = |_reduced4CExtra_reducedVec_4_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_4 = _reduced4CExtra_reducedVec_4_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _reduced4CExtra_reducedVec_5_T = _reduced4CExtra_T[23:20]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_5_T_1 = |_reduced4CExtra_reducedVec_5_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_5 = _reduced4CExtra_reducedVec_5_T_1; // @[primitives.scala:118:30, :120:54]
wire [2:0] _reduced4CExtra_reducedVec_6_T = _reduced4CExtra_T[26:24]; // @[primitives.scala:123:15]
assign _reduced4CExtra_reducedVec_6_T_1 = |_reduced4CExtra_reducedVec_6_T; // @[primitives.scala:123:{15,57}]
assign reduced4CExtra_reducedVec_6 = _reduced4CExtra_reducedVec_6_T_1; // @[primitives.scala:118:30, :123:57]
wire [1:0] reduced4CExtra_lo_hi = {reduced4CExtra_reducedVec_2, reduced4CExtra_reducedVec_1}; // @[primitives.scala:118:30, :124:20]
wire [2:0] reduced4CExtra_lo = {reduced4CExtra_lo_hi, reduced4CExtra_reducedVec_0}; // @[primitives.scala:118:30, :124:20]
wire [1:0] reduced4CExtra_hi_lo = {reduced4CExtra_reducedVec_4, reduced4CExtra_reducedVec_3}; // @[primitives.scala:118:30, :124:20]
wire [1:0] reduced4CExtra_hi_hi = {reduced4CExtra_reducedVec_6, reduced4CExtra_reducedVec_5}; // @[primitives.scala:118:30, :124:20]
wire [3:0] reduced4CExtra_hi = {reduced4CExtra_hi_hi, reduced4CExtra_hi_lo}; // @[primitives.scala:124:20]
wire [6:0] _reduced4CExtra_T_1 = {reduced4CExtra_hi, reduced4CExtra_lo}; // @[primitives.scala:124:20]
wire [4:0] _reduced4CExtra_T_2 = CAlignDist[6:2]; // @[MulAddRecFN.scala:112:12, :124:28]
wire [32:0] reduced4CExtra_shift = $signed(33'sh100000000 >>> _reduced4CExtra_T_2); // @[primitives.scala:76:56]
wire [5:0] _reduced4CExtra_T_3 = reduced4CExtra_shift[19:14]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _reduced4CExtra_T_4 = _reduced4CExtra_T_3[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _reduced4CExtra_T_5 = _reduced4CExtra_T_4[1:0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_6 = _reduced4CExtra_T_5[0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_7 = _reduced4CExtra_T_5[1]; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_8 = {_reduced4CExtra_T_6, _reduced4CExtra_T_7}; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_9 = _reduced4CExtra_T_4[3:2]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_10 = _reduced4CExtra_T_9[0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_11 = _reduced4CExtra_T_9[1]; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_12 = {_reduced4CExtra_T_10, _reduced4CExtra_T_11}; // @[primitives.scala:77:20]
wire [3:0] _reduced4CExtra_T_13 = {_reduced4CExtra_T_8, _reduced4CExtra_T_12}; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_14 = _reduced4CExtra_T_3[5:4]; // @[primitives.scala:77:20, :78:22]
wire _reduced4CExtra_T_15 = _reduced4CExtra_T_14[0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_16 = _reduced4CExtra_T_14[1]; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_17 = {_reduced4CExtra_T_15, _reduced4CExtra_T_16}; // @[primitives.scala:77:20]
wire [5:0] _reduced4CExtra_T_18 = {_reduced4CExtra_T_13, _reduced4CExtra_T_17}; // @[primitives.scala:77:20]
wire [6:0] _reduced4CExtra_T_19 = {1'h0, _reduced4CExtra_T_1[5:0] & _reduced4CExtra_T_18}; // @[primitives.scala:77:20, :124:20]
wire reduced4CExtra = |_reduced4CExtra_T_19; // @[MulAddRecFN.scala:122:68, :130:11]
wire [74:0] _alignedSigC_T = mainAlignedSigC[77:3]; // @[MulAddRecFN.scala:120:100, :132:28]
wire [74:0] alignedSigC_hi = _alignedSigC_T; // @[MulAddRecFN.scala:132:{12,28}]
wire [2:0] _alignedSigC_T_1 = mainAlignedSigC[2:0]; // @[MulAddRecFN.scala:120:100, :134:32]
wire [2:0] _alignedSigC_T_5 = mainAlignedSigC[2:0]; // @[MulAddRecFN.scala:120:100, :134:32, :135:32]
wire _alignedSigC_T_2 = &_alignedSigC_T_1; // @[MulAddRecFN.scala:134:{32,39}]
wire _alignedSigC_T_3 = ~reduced4CExtra; // @[MulAddRecFN.scala:130:11, :134:47]
wire _alignedSigC_T_4 = _alignedSigC_T_2 & _alignedSigC_T_3; // @[MulAddRecFN.scala:134:{39,44,47}]
wire _alignedSigC_T_6 = |_alignedSigC_T_5; // @[MulAddRecFN.scala:135:{32,39}]
wire _alignedSigC_T_7 = _alignedSigC_T_6 | reduced4CExtra; // @[MulAddRecFN.scala:130:11, :135:{39,44}]
wire _alignedSigC_T_8 = doSubMags ? _alignedSigC_T_4 : _alignedSigC_T_7; // @[MulAddRecFN.scala:102:42, :133:16, :134:44, :135:44]
wire [75:0] alignedSigC = {alignedSigC_hi, _alignedSigC_T_8}; // @[MulAddRecFN.scala:132:12, :133:16]
assign io_mulAddA_0 = rawA_sig[23:0]; // @[rawFloatFromRecFN.scala:55:23]
assign _io_mulAddC_T = alignedSigC[48:1]; // @[MulAddRecFN.scala:132:12, :143:30]
assign io_mulAddC_0 = _io_mulAddC_T; // @[MulAddRecFN.scala:71:7, :143:30]
wire _io_toPostMul_isSigNaNAny_T = rawA_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_toPostMul_isSigNaNAny_T_1 = ~_io_toPostMul_isSigNaNAny_T; // @[common.scala:82:{49,56}]
wire _io_toPostMul_isSigNaNAny_T_2 = rawA_isNaN & _io_toPostMul_isSigNaNAny_T_1; // @[rawFloatFromRecFN.scala:55:23]
wire _io_toPostMul_isSigNaNAny_T_6 = _io_toPostMul_isSigNaNAny_T_2; // @[common.scala:82:46]
wire _io_toPostMul_isSigNaNAny_T_7 = rawC_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_toPostMul_isSigNaNAny_T_8 = ~_io_toPostMul_isSigNaNAny_T_7; // @[common.scala:82:{49,56}]
wire _io_toPostMul_isSigNaNAny_T_9 = rawC_isNaN & _io_toPostMul_isSigNaNAny_T_8; // @[rawFloatFromRecFN.scala:55:23]
assign _io_toPostMul_isSigNaNAny_T_10 = _io_toPostMul_isSigNaNAny_T_6 | _io_toPostMul_isSigNaNAny_T_9; // @[common.scala:82:46]
assign io_toPostMul_isSigNaNAny_0 = _io_toPostMul_isSigNaNAny_T_10; // @[MulAddRecFN.scala:71:7, :146:58]
assign io_toPostMul_isNaNAOrB_0 = _io_toPostMul_isNaNAOrB_T; // @[MulAddRecFN.scala:71:7, :148:42]
wire [11:0] _io_toPostMul_sExpSum_T = _GEN - 12'h18; // @[MulAddRecFN.scala:106:42, :158:53]
wire [10:0] _io_toPostMul_sExpSum_T_1 = _io_toPostMul_sExpSum_T[10:0]; // @[MulAddRecFN.scala:158:53]
wire [10:0] _io_toPostMul_sExpSum_T_2 = _io_toPostMul_sExpSum_T_1; // @[MulAddRecFN.scala:158:53]
wire [10:0] _io_toPostMul_sExpSum_T_3 = CIsDominant ? {rawC_sExp[9], rawC_sExp} : _io_toPostMul_sExpSum_T_2; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_sExpSum_0 = _io_toPostMul_sExpSum_T_3[9:0]; // @[MulAddRecFN.scala:71:7, :157:28, :158:12]
assign _io_toPostMul_CDom_CAlignDist_T = CAlignDist[4:0]; // @[MulAddRecFN.scala:112:12, :161:47]
assign io_toPostMul_CDom_CAlignDist_0 = _io_toPostMul_CDom_CAlignDist_T; // @[MulAddRecFN.scala:71:7, :161:47]
assign _io_toPostMul_highAlignedSigC_T = alignedSigC[74:49]; // @[MulAddRecFN.scala:132:12, :163:20]
assign io_toPostMul_highAlignedSigC_0 = _io_toPostMul_highAlignedSigC_T; // @[MulAddRecFN.scala:71:7, :163:20]
assign _io_toPostMul_bit0AlignedSigC_T = alignedSigC[0]; // @[MulAddRecFN.scala:132:12, :164:48]
assign io_toPostMul_bit0AlignedSigC_0 = _io_toPostMul_bit0AlignedSigC_T; // @[MulAddRecFN.scala:71:7, :164:48]
assign io_mulAddA = io_mulAddA_0; // @[MulAddRecFN.scala:71:7]
assign io_mulAddC = io_mulAddC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isSigNaNAny = io_toPostMul_isSigNaNAny_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isNaNAOrB = io_toPostMul_isNaNAOrB_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isInfA = io_toPostMul_isInfA_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isZeroA = io_toPostMul_isZeroA_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_signProd = io_toPostMul_signProd_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isNaNC = io_toPostMul_isNaNC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isInfC = io_toPostMul_isInfC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isZeroC = io_toPostMul_isZeroC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_sExpSum = io_toPostMul_sExpSum_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_doSubMags = io_toPostMul_doSubMags_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_CIsDominant = io_toPostMul_CIsDominant_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_CDom_CAlignDist = io_toPostMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_highAlignedSigC = io_toPostMul_highAlignedSigC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_bit0AlignedSigC = io_toPostMul_bit0AlignedSigC_0; // @[MulAddRecFN.scala:71:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File SBA.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.devices.debug.systembusaccess
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.{AMBAProt, AMBAProtField}
import freechips.rocketchip.devices.debug.{DebugModuleKey, RWNotify, SBCSFields, WNotifyVal}
import freechips.rocketchip.diplomacy.TransferSizes
import freechips.rocketchip.regmapper.{RegField, RegFieldDesc, RegFieldGroup, RegFieldWrType}
import freechips.rocketchip.tilelink.{TLClientNode, TLMasterParameters, TLMasterPortParameters}
import freechips.rocketchip.util.property
object SystemBusAccessState extends scala.Enumeration {
type SystemBusAccessState = Value
val Idle, SBReadRequest, SBWriteRequest, SBReadResponse, SBWriteResponse = Value
}
object SBErrorCode extends scala.Enumeration {
type SBErrorCode = Value
val NoError = Value(0)
val Timeout = Value(1)
val BadAddr = Value(2)
val AlgnError = Value(3)
val BadAccess = Value(4)
val OtherError = Value(7)
}
object SystemBusAccessModule
{
def apply(sb2tl: SBToTL, dmactive: Bool, dmAuthenticated: Bool)(implicit p: Parameters):
(Seq[RegField], Seq[Seq[RegField]], Seq[Seq[RegField]]) =
{
import SBErrorCode._
val cfg = p(DebugModuleKey).get
val anyAddressWrEn = WireInit(false.B).suggestName("anyAddressWrEn")
val anyDataRdEn = WireInit(false.B).suggestName("anyDataRdEn")
val anyDataWrEn = WireInit(false.B).suggestName("anyDataWrEn")
// --- SBCS Status Register ---
val SBCSFieldsReg = Reg(new SBCSFields()).suggestName("SBCSFieldsReg")
val SBCSFieldsRegReset = WireInit(0.U.asTypeOf(new SBCSFields()))
SBCSFieldsRegReset.sbversion := 1.U(1.W) // This code implements a version of the spec after January 1, 2018
SBCSFieldsRegReset.sbbusy := (sb2tl.module.io.sbStateOut =/= SystemBusAccessState.Idle.id.U)
SBCSFieldsRegReset.sbaccess := 2.U
SBCSFieldsRegReset.sbasize := sb2tl.module.edge.bundle.addressBits.U
SBCSFieldsRegReset.sbaccess128 := (cfg.maxSupportedSBAccess == 128).B
SBCSFieldsRegReset.sbaccess64 := (cfg.maxSupportedSBAccess >= 64).B
SBCSFieldsRegReset.sbaccess32 := (cfg.maxSupportedSBAccess >= 32).B
SBCSFieldsRegReset.sbaccess16 := (cfg.maxSupportedSBAccess >= 16).B
SBCSFieldsRegReset.sbaccess8 := (cfg.maxSupportedSBAccess >= 8).B
val SBCSRdData = WireInit(0.U.asTypeOf(new SBCSFields())).suggestName("SBCSRdData")
val SBCSWrDataVal = WireInit(0.U(32.W))
val SBCSWrData = WireInit(SBCSWrDataVal.asTypeOf(new SBCSFields()))
val sberrorWrEn = WireInit(false.B)
val sbreadondataWrEn = WireInit(false.B)
val sbautoincrementWrEn= WireInit(false.B)
val sbaccessWrEn = WireInit(false.B)
val sbreadonaddrWrEn = WireInit(false.B)
val sbbusyerrorWrEn = WireInit(false.B)
val sbcsfields = RegFieldGroup("sbcs", Some("system bus access control and status"), Seq(
RegField.r(1, SBCSRdData.sbaccess8, RegFieldDesc("sbaccess8", "8-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 8) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess16, RegFieldDesc("sbaccess16", "16-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 16) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess32, RegFieldDesc("sbaccess32", "32-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 32) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess64, RegFieldDesc("sbaccess64", "64-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 64) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess128, RegFieldDesc("sbaccess128", "128-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess == 128) 1 else 0))),
RegField.r(7, SBCSRdData.sbasize, RegFieldDesc("sbasize", "bits in address", reset=Some(sb2tl.module.edge.bundle.addressBits))),
WNotifyVal(3, SBCSRdData.sberror, SBCSWrData.sberror, sberrorWrEn,
RegFieldDesc("sberror", "system bus error", reset=Some(0), wrType=Some(RegFieldWrType.ONE_TO_CLEAR))),
WNotifyVal(1, SBCSRdData.sbreadondata, SBCSWrData.sbreadondata, sbreadondataWrEn,
RegFieldDesc("sbreadondata", "system bus read on data", reset=Some(0))),
WNotifyVal(1, SBCSRdData.sbautoincrement, SBCSWrData.sbautoincrement, sbautoincrementWrEn,
RegFieldDesc("sbautoincrement", "system bus auto-increment address", reset=Some(0))),
WNotifyVal(3, SBCSRdData.sbaccess, SBCSWrData.sbaccess, sbaccessWrEn,
RegFieldDesc("sbaccess", "system bus access size", reset=Some(2))),
WNotifyVal(1, SBCSRdData.sbreadonaddr, SBCSWrData.sbreadonaddr, sbreadonaddrWrEn,
RegFieldDesc("sbreadonaddr", "system bus read on data", reset=Some(0))),
RegField.r(1, SBCSRdData.sbbusy, RegFieldDesc("sbbusy", "system bus access is busy", reset=Some(0))),
WNotifyVal(1, SBCSRdData.sbbusyerror, SBCSWrData.sbbusyerror, sbbusyerrorWrEn,
RegFieldDesc("sbbusyerror", "system bus busy error", reset=Some(0), wrType=Some(RegFieldWrType.ONE_TO_CLEAR))),
RegField(6),
RegField.r(3, SBCSRdData.sbversion, RegFieldDesc("sbversion", "system bus access version", reset=Some(1))),
))
// --- System Bus Address Registers ---
// ADDR0 Register is required
// Instantiate ADDR1-3 registers as needed depending on system bus address width
val hasSBAddr1 = (sb2tl.module.edge.bundle.addressBits >= 33)
val hasSBAddr2 = (sb2tl.module.edge.bundle.addressBits >= 65)
val hasSBAddr3 = (sb2tl.module.edge.bundle.addressBits >= 97)
val hasAddr = Seq(true, hasSBAddr1, hasSBAddr2, hasSBAddr3)
val SBADDRESSFieldsReg = Reg(Vec(4, UInt(32.W)))
SBADDRESSFieldsReg.zipWithIndex.foreach { case(a,i) => a.suggestName("SBADDRESS"+i+"FieldsReg")}
val SBADDRESSWrData = WireInit(VecInit(Seq.fill(4) {0.U(32.W)} ))
val SBADDRESSRdEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
val SBADDRESSWrEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
val autoIncrementedAddr = WireInit(0.U(128.W))
autoIncrementedAddr := Cat(SBADDRESSFieldsReg.reverse) + (1.U << SBCSFieldsReg.sbaccess)
autoIncrementedAddr.suggestName("autoIncrementedAddr")
val sbaddrfields: Seq[Seq[RegField]] = SBADDRESSFieldsReg.zipWithIndex.map { case(a,i) =>
if(hasAddr(i)) {
when (~dmactive || ~dmAuthenticated) {
a := 0.U(32.W)
}.otherwise {
a := Mux(SBADDRESSWrEn(i) && !SBCSRdData.sberror && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror, SBADDRESSWrData(i),
Mux((sb2tl.module.io.rdDone || sb2tl.module.io.wrDone) && SBCSFieldsReg.sbautoincrement, autoIncrementedAddr(32*i+31,32*i), a))
}
RegFieldGroup("dmi_sbaddr"+i, Some("SBA Address Register"), Seq(RWNotify(32, a, SBADDRESSWrData(i), SBADDRESSRdEn(i), SBADDRESSWrEn(i),
Some(RegFieldDesc("dmi_sbaddr"+i, "SBA address register", reset=Some(0), volatile=true)))))
} else {
a := DontCare
Seq.empty[RegField]
}
}
sb2tl.module.io.addrIn := Mux(SBADDRESSWrEn(0),
Cat(Cat(SBADDRESSFieldsReg.drop(1).reverse), SBADDRESSWrData(0)),
Cat(SBADDRESSFieldsReg.reverse))
anyAddressWrEn := SBADDRESSWrEn.reduce(_ || _)
// --- System Bus Data Registers ---
// DATA0 Register is required
// DATA1-3 Registers may not be needed depending on implementation
val hasSBData1 = (cfg.maxSupportedSBAccess > 32)
val hasSBData2And3 = (cfg.maxSupportedSBAccess == 128)
val hasData = Seq(true, hasSBData1, hasSBData2And3, hasSBData2And3)
val SBDATAFieldsReg = Reg(Vec(4, Vec(4, UInt(8.W))))
SBDATAFieldsReg.zipWithIndex.foreach { case(d,i) => d.zipWithIndex.foreach { case(d,j) => d.suggestName("SBDATA"+i+"BYTE"+j) }}
val SBDATARdData = WireInit(VecInit(Seq.fill(4) {0.U(32.W)} ))
SBDATARdData.zipWithIndex.foreach { case(d,i) => d.suggestName("SBDATARdData"+i) }
val SBDATAWrData = WireInit(VecInit(Seq.fill(4) {0.U(32.W)} ))
SBDATAWrData.zipWithIndex.foreach { case(d,i) => d.suggestName("SBDATAWrData"+i) }
val SBDATARdEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
val SBDATAWrEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
SBDATAWrEn.zipWithIndex.foreach { case(d,i) => d.suggestName("SBDATAWrEn"+i) }
val sbdatafields: Seq[Seq[RegField]] = SBDATAFieldsReg.zipWithIndex.map { case(d,i) =>
if(hasData(i)) {
// For data registers, load enable per-byte
for (j <- 0 to 3) {
when (~dmactive || ~dmAuthenticated) {
d(j) := 0.U(8.W)
}.otherwise {
d(j) := Mux(SBDATAWrEn(i) && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror && !SBCSRdData.sberror, SBDATAWrData(i)(8*j+7,8*j),
Mux(sb2tl.module.io.rdLoad(4*i+j), sb2tl.module.io.dataOut, d(j)))
}
}
SBDATARdData(i) := Cat(d.reverse)
RegFieldGroup("dmi_sbdata"+i, Some("SBA Data Register"), Seq(RWNotify(32, SBDATARdData(i), SBDATAWrData(i), SBDATARdEn(i), SBDATAWrEn(i),
Some(RegFieldDesc("dmi_sbdata"+i, "SBA data register", reset=Some(0), volatile=true)))))
} else {
for (j <- 0 to 3) { d(j) := DontCare }
Seq.empty[RegField]
}
}
sb2tl.module.io.dataIn := Mux(sb2tl.module.io.wrEn,Cat(SBDATAWrData.reverse),Cat(SBDATAFieldsReg.flatten.reverse))
anyDataRdEn := SBDATARdEn.reduce(_ || _)
anyDataWrEn := SBDATAWrEn.reduce(_ || _)
val tryWrEn = SBDATAWrEn(0)
val tryRdEn = (SBADDRESSWrEn(0) && SBCSFieldsReg.sbreadonaddr) || (SBDATARdEn(0) && SBCSFieldsReg.sbreadondata)
val sbAccessError = (SBCSFieldsReg.sbaccess === 0.U) && (SBCSFieldsReg.sbaccess8 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 1.U) && (SBCSFieldsReg.sbaccess16 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 2.U) && (SBCSFieldsReg.sbaccess32 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 3.U) && (SBCSFieldsReg.sbaccess64 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 4.U) && (SBCSFieldsReg.sbaccess128 =/= 1.U) || (SBCSFieldsReg.sbaccess > 4.U)
val compareAddr = Wire(UInt(32.W)) // Need use written or latched address to detect error case depending on how transaction is initiated
compareAddr := Mux(SBADDRESSWrEn(0),SBADDRESSWrData(0),SBADDRESSFieldsReg(0))
val sbAlignmentError = (SBCSFieldsReg.sbaccess === 1.U) && (compareAddr(0) =/= 0.U) ||
(SBCSFieldsReg.sbaccess === 2.U) && (compareAddr(1,0) =/= 0.U) ||
(SBCSFieldsReg.sbaccess === 3.U) && (compareAddr(2,0) =/= 0.U) ||
(SBCSFieldsReg.sbaccess === 4.U) && (compareAddr(3,0) =/= 0.U)
sbAccessError.suggestName("sbAccessError")
sbAlignmentError.suggestName("sbAlignmentError")
sb2tl.module.io.wrEn := dmAuthenticated && tryWrEn && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror && !SBCSRdData.sberror && !sbAccessError && !sbAlignmentError
sb2tl.module.io.rdEn := dmAuthenticated && tryRdEn && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror && !SBCSRdData.sberror && !sbAccessError && !sbAlignmentError
sb2tl.module.io.sizeIn := SBCSFieldsReg.sbaccess
val sbBusy = (sb2tl.module.io.sbStateOut =/= SystemBusAccessState.Idle.id.U)
when (~dmactive || ~dmAuthenticated) {
SBCSFieldsReg := SBCSFieldsRegReset
}.otherwise {
SBCSFieldsReg.sbbusyerror := Mux(sbbusyerrorWrEn && SBCSWrData.sbbusyerror, false.B, // W1C
Mux(anyAddressWrEn && sbBusy, true.B, // Set if a write to SBADDRESS occurs while busy
Mux((anyDataRdEn || anyDataWrEn) && sbBusy, true.B, SBCSFieldsReg.sbbusyerror))) // Set if any access to SBDATA occurs while busy
SBCSFieldsReg.sbreadonaddr := Mux(sbreadonaddrWrEn, SBCSWrData.sbreadonaddr , SBCSFieldsReg.sbreadonaddr)
SBCSFieldsReg.sbautoincrement := Mux(sbautoincrementWrEn, SBCSWrData.sbautoincrement, SBCSFieldsReg.sbautoincrement)
SBCSFieldsReg.sbreadondata := Mux(sbreadondataWrEn, SBCSWrData.sbreadondata , SBCSFieldsReg.sbreadondata)
SBCSFieldsReg.sbaccess := Mux(sbaccessWrEn, SBCSWrData.sbaccess, SBCSFieldsReg.sbaccess)
SBCSFieldsReg.sbversion := 1.U(1.W) // This code implements a version of the spec after January 1, 2018
}
// sbErrorReg has a per-bit load enable since each bit can be individually cleared by writing a 1 to it
val sbErrorReg = Reg(Vec(4, UInt(1.W)))
when(~dmactive || ~dmAuthenticated) {
for (i <- 0 until 3)
sbErrorReg(i) := 0.U
}.otherwise {
for (i <- 0 until 3)
sbErrorReg(i) := Mux(sberrorWrEn && SBCSWrData.sberror(i) === 1.U, NoError.id.U.extract(i), // W1C
Mux((sb2tl.module.io.wrEn && !sb2tl.module.io.wrLegal) || (sb2tl.module.io.rdEn && !sb2tl.module.io.rdLegal), BadAddr.id.U.extract(i), // Bad address accessed
Mux((tryWrEn || tryRdEn) && sbAlignmentError, AlgnError.id.U.extract(i), // Address alignment error
Mux((tryWrEn || tryRdEn) && sbAccessError, BadAccess.id.U.extract(i), // Access size error
Mux((sb2tl.module.io.rdDone || sb2tl.module.io.wrDone) && sb2tl.module.io.respError, OtherError.id.U.extract(i), sbErrorReg(i)))))) // Response error from TL
}
SBCSRdData := SBCSFieldsReg
SBCSRdData.sbasize := sb2tl.module.edge.bundle.addressBits.U
SBCSRdData.sbaccess128 := (cfg.maxSupportedSBAccess == 128).B
SBCSRdData.sbaccess64 := (cfg.maxSupportedSBAccess >= 64).B
SBCSRdData.sbaccess32 := (cfg.maxSupportedSBAccess >= 32).B
SBCSRdData.sbaccess16 := (cfg.maxSupportedSBAccess >= 16).B
SBCSRdData.sbaccess8 := (cfg.maxSupportedSBAccess >= 8).B
SBCSRdData.sbbusy := sbBusy
SBCSRdData.sberror := sbErrorReg.asUInt
when (~dmAuthenticated) { // Read value must be 0 if not authenticated
SBCSRdData := 0.U.asTypeOf(new SBCSFields())
}
property.cover(SBCSFieldsReg.sbbusyerror, "SBCS Cover", "sberror set")
property.cover(SBCSFieldsReg.sbbusy === 3.U, "SBCS Cover", "sbbusyerror alignment error")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 0.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "8-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 1.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "16-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 2.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "32-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 3.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "64-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 4.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "128-bit access")
property.cover(SBCSFieldsReg.sbautoincrement && SBCSFieldsReg.sbbusy, "SBCS Cover", "Access with autoincrement set")
property.cover(!SBCSFieldsReg.sbautoincrement && SBCSFieldsReg.sbbusy, "SBCS Cover", "Access without autoincrement set")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess > 4.U, "SBCS Cover", "Invalid sbaccess value")
(sbcsfields, sbaddrfields, sbdatafields)
}
}
class SBToTL(implicit p: Parameters) extends LazyModule {
val cfg = p(DebugModuleKey).get
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
clients = Seq(TLMasterParameters.v1("debug")),
requestFields = Seq(AMBAProtField()))))
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val rdEn = Input(Bool())
val wrEn = Input(Bool())
val addrIn = Input(UInt(128.W)) // TODO: Parameterize these widths
val dataIn = Input(UInt(128.W))
val sizeIn = Input(UInt(3.W))
val rdLegal = Output(Bool())
val wrLegal = Output(Bool())
val rdDone = Output(Bool())
val wrDone = Output(Bool())
val respError = Output(Bool())
val dataOut = Output(UInt(8.W))
val rdLoad = Output(Vec(cfg.maxSupportedSBAccess/8, Bool()))
val sbStateOut = Output(UInt(log2Ceil(SystemBusAccessState.maxId).W))
})
val rf_reset = IO(Input(Reset()))
import SystemBusAccessState._
val (tl, edge) = node.out(0)
val sbState = RegInit(0.U)
// --- Drive payloads on bus to TileLink ---
val d = Queue(tl.d, 2) // Add a small buffer since response could arrive on same cycle as request
d.ready := (sbState === SBReadResponse.id.U) || (sbState === SBWriteResponse.id.U)
val muxedData = WireInit(0.U(8.W))
val requestValid = tl.a.valid
val requestReady = tl.a.ready
val responseValid = d.valid
val responseReady = d.ready
val counter = RegInit(0.U((log2Ceil(cfg.maxSupportedSBAccess/8)+1).W))
val vecData = Wire(Vec(cfg.maxSupportedSBAccess/8, UInt(8.W)))
vecData.zipWithIndex.map { case (vd, i) => vd := io.dataIn(8*i+7,8*i) }
muxedData := vecData(counter(log2Ceil(vecData.size)-1,0))
// Need an additional check to determine if address is safe for Get/Put
val rdLegal_addr = edge.manager.supportsGetSafe(io.addrIn, io.sizeIn, Some(TransferSizes(1,cfg.maxSupportedSBAccess/8)))
val wrLegal_addr = edge.manager.supportsPutFullSafe(io.addrIn, io.sizeIn, Some(TransferSizes(1,cfg.maxSupportedSBAccess/8)))
val (_, gbits) = edge.Get(0.U, io.addrIn, io.sizeIn)
val (_, pfbits) = edge.Put(0.U, io.addrIn, io.sizeIn, muxedData)
io.rdLegal := rdLegal_addr
io.wrLegal := wrLegal_addr
io.sbStateOut := sbState
when(sbState === SBReadRequest.id.U) { tl.a.bits := gbits }
.otherwise { tl.a.bits := pfbits }
tl.a.bits.user.lift(AMBAProt).foreach { x =>
x.bufferable := false.B
x.modifiable := false.B
x.readalloc := false.B
x.writealloc := false.B
x.privileged := true.B
x.secure := true.B
x.fetch := false.B
}
val respError = d.bits.denied || d.bits.corrupt
io.respError := respError
val wrTxValid = sbState === SBWriteRequest.id.U && requestValid && requestReady
val rdTxValid = sbState === SBReadResponse.id.U && responseValid && responseReady
val txLast = counter === ((1.U << io.sizeIn) - 1.U)
counter := Mux((wrTxValid || rdTxValid) && txLast, 0.U,
Mux((wrTxValid || rdTxValid) , counter+1.U, counter))
for (i <- 0 until (cfg.maxSupportedSBAccess/8)) {
io.rdLoad(i) := rdTxValid && (counter === i.U)
}
// --- State Machine to interface with TileLink ---
when (sbState === Idle.id.U){
sbState := Mux(io.rdEn && io.rdLegal, SBReadRequest.id.U,
Mux(io.wrEn && io.wrLegal, SBWriteRequest.id.U, sbState))
}.elsewhen (sbState === SBReadRequest.id.U){
sbState := Mux(requestValid && requestReady, SBReadResponse.id.U, sbState)
}.elsewhen (sbState === SBWriteRequest.id.U){
sbState := Mux(wrTxValid && txLast, SBWriteResponse.id.U, sbState)
}.elsewhen (sbState === SBReadResponse.id.U){
sbState := Mux(rdTxValid && txLast, Idle.id.U, sbState)
}.elsewhen (sbState === SBWriteResponse.id.U){
sbState := Mux(responseValid && responseReady, Idle.id.U, sbState)
}
io.rdDone := rdTxValid && txLast
io.wrDone := (sbState === SBWriteResponse.id.U) && responseValid && responseReady
io.dataOut := d.bits.data
tl.a.valid := (sbState === SBReadRequest.id.U) || (sbState === SBWriteRequest.id.U)
// Tie off unused channels
tl.b.ready := false.B
tl.c.valid := false.B
tl.e.valid := false.B
assert (sbState === Idle.id.U ||
sbState === SBReadRequest.id.U ||
sbState === SBWriteRequest.id.U ||
sbState === SBReadResponse.id.U ||
sbState === SBWriteResponse.id.U, "SBA state machine in undefined state")
property.cover (sbState === Idle.id.U, "SBA State Cover", "SBA Access Idle")
property.cover (sbState === SBReadRequest.id.U, "SBA State Cover", "SBA Access Read Req")
property.cover (sbState === SBWriteRequest.id.U, "SBA State Cover", "SBA Access Write Req")
property.cover (sbState === SBReadResponse.id.U, "SBA State Cover", "SBA Access Read Resp")
property.cover (sbState === SBWriteResponse.id.U, "SBA State Cover", "SBA Access Write Resp")
property.cover (io.rdEn && !io.rdLegal, "SB Legality Cover", "SBA Rd Address Illegal")
property.cover (io.wrEn && !io.wrLegal, "SB Legality Cover", "SBA Wr Address Illegal")
}
}
| module SBToTL( // @[SBA.scala:273:9]
input clock, // @[SBA.scala:273:9]
input reset, // @[SBA.scala:273:9]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input io_rdEn, // @[SBA.scala:274:16]
input io_wrEn, // @[SBA.scala:274:16]
input [127:0] io_addrIn, // @[SBA.scala:274:16]
input [127:0] io_dataIn, // @[SBA.scala:274:16]
input [2:0] io_sizeIn, // @[SBA.scala:274:16]
output io_rdLegal, // @[SBA.scala:274:16]
output io_wrLegal, // @[SBA.scala:274:16]
output io_rdDone, // @[SBA.scala:274:16]
output io_wrDone, // @[SBA.scala:274:16]
output io_respError, // @[SBA.scala:274:16]
output [7:0] io_dataOut, // @[SBA.scala:274:16]
output io_rdLoad_0, // @[SBA.scala:274:16]
output io_rdLoad_1, // @[SBA.scala:274:16]
output io_rdLoad_2, // @[SBA.scala:274:16]
output io_rdLoad_3, // @[SBA.scala:274:16]
output io_rdLoad_4, // @[SBA.scala:274:16]
output io_rdLoad_5, // @[SBA.scala:274:16]
output io_rdLoad_6, // @[SBA.scala:274:16]
output io_rdLoad_7, // @[SBA.scala:274:16]
output [2:0] io_sbStateOut // @[SBA.scala:274:16]
);
wire _d_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire _d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21]
wire _d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
reg [2:0] sbState; // @[SBA.scala:295:26]
wire _rdTxValid_T = sbState == 3'h3; // @[SBA.scala:295:26, :299:25]
wire _io_wrDone_T = sbState == 3'h4; // @[SBA.scala:295:26, :299:62]
wire d_q_io_deq_ready = _rdTxValid_T | _io_wrDone_T; // @[SBA.scala:299:{25,50,62}]
reg [3:0] counter; // @[SBA.scala:307:26]
wire [7:0][7:0] _GEN = {{io_dataIn[63:56]}, {io_dataIn[55:48]}, {io_dataIn[47:40]}, {io_dataIn[39:32]}, {io_dataIn[31:24]}, {io_dataIn[23:16]}, {io_dataIn[15:8]}, {io_dataIn[7:0]}}; // @[SBA.scala:309:63, :310:15]
wire [115:0] _GEN_0 = {io_addrIn[127:14], ~(io_addrIn[13:12])}; // @[Parameters.scala:137:{31,41,46}]
wire [114:0] _GEN_1 = {io_addrIn[127:21], io_addrIn[20:17] ^ 4'h8, io_addrIn[15:12]}; // @[Parameters.scala:137:{31,41,46}]
wire [111:0] _GEN_2 = {io_addrIn[127:26], io_addrIn[25:16] ^ 10'h200}; // @[Parameters.scala:137:{31,41,46}]
wire [115:0] _GEN_3 = {io_addrIn[127:26], io_addrIn[25:12] ^ 14'h2010}; // @[Parameters.scala:137:{31,41,46}]
wire [111:0] _GEN_4 = {io_addrIn[127:28], io_addrIn[27:16] ^ 12'h800}; // @[Parameters.scala:137:{31,41,46}]
wire [101:0] _GEN_5 = {io_addrIn[127:28], ~(io_addrIn[27:26])}; // @[Parameters.scala:137:{31,41,46}]
wire [115:0] _GEN_6 = {io_addrIn[127:29], io_addrIn[28:12] ^ 17'h10020}; // @[Parameters.scala:137:{31,41,46}]
wire [99:0] _GEN_7 = {io_addrIn[127:32], io_addrIn[31:28] ^ 4'h8}; // @[Parameters.scala:137:{31,41,46}]
wire io_rdLegal_0 = ~(|(io_addrIn[127:13])) | ~(|_GEN_0) | {io_addrIn[127:17], ~(io_addrIn[16])} == 112'h0 | ~(|_GEN_1) | ~(|_GEN_2) | ~(|_GEN_3) | ~(|_GEN_4) | ~(|_GEN_5) | ~(|_GEN_6) | ~(|_GEN_7); // @[Parameters.scala:685:42]
wire io_wrLegal_0 = ~(|(io_addrIn[127:13])) | ~(|_GEN_0) | ~(|_GEN_1) | ~(|_GEN_2) | ~(|_GEN_3) | ~(|_GEN_4) | ~(|_GEN_5) | ~(|_GEN_6) | ~(|_GEN_7); // @[Parameters.scala:685:42]
wire _nodeOut_a_valid_T = sbState == 3'h1; // @[SBA.scala:295:26, :322:18]
wire _nodeOut_a_valid_T_1 = sbState == 3'h2; // @[SBA.scala:295:26, :338:29]
wire rdTxValid = _rdTxValid_T & _d_q_io_deq_valid & d_q_io_deq_ready; // @[Decoupled.scala:362:21]
wire txLast = {4'h0, counter} == (8'h1 << io_sizeIn) - 8'h1; // @[SBA.scala:307:26, :340:{29,39,53}]
wire _GEN_8 = sbState == 3'h0; // @[SBA.scala:295:26, :349:19]
wire nodeOut_a_valid = _nodeOut_a_valid_T | _nodeOut_a_valid_T_1; // @[SBA.scala:322:18, :338:29, :366:52] |
Generate the Verilog code corresponding to the following Chisel files.
File MSHR.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module MSHR_10( // @[MSHR.scala:84:7]
input clock, // @[MSHR.scala:84:7]
input reset, // @[MSHR.scala:84:7]
input io_allocate_valid, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_1, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_2, // @[MSHR.scala:86:14]
input io_allocate_bits_control, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14]
input [7:0] io_allocate_bits_source, // @[MSHR.scala:86:14]
input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14]
input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14]
input io_allocate_bits_repeat, // @[MSHR.scala:86:14]
input io_directory_valid, // @[MSHR.scala:86:14]
input io_directory_bits_dirty, // @[MSHR.scala:86:14]
input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14]
input io_directory_bits_clients, // @[MSHR.scala:86:14]
input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14]
input io_directory_bits_hit, // @[MSHR.scala:86:14]
input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14]
output io_status_valid, // @[MSHR.scala:86:14]
output [9:0] io_status_bits_set, // @[MSHR.scala:86:14]
output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14]
output [2:0] io_status_bits_way, // @[MSHR.scala:86:14]
output io_status_bits_blockB, // @[MSHR.scala:86:14]
output io_status_bits_nestB, // @[MSHR.scala:86:14]
output io_status_bits_blockC, // @[MSHR.scala:86:14]
output io_status_bits_nestC, // @[MSHR.scala:86:14]
input io_schedule_ready, // @[MSHR.scala:86:14]
output io_schedule_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_a_valid, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14]
output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14]
output io_schedule_bits_b_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14]
output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14]
output io_schedule_bits_c_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14]
output io_schedule_bits_d_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14]
output [7:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14]
output io_schedule_bits_e_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14]
output io_schedule_bits_x_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14]
output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14]
output io_schedule_bits_reload, // @[MSHR.scala:86:14]
input io_sinkc_valid, // @[MSHR.scala:86:14]
input io_sinkc_bits_last, // @[MSHR.scala:86:14]
input [9:0] io_sinkc_bits_set, // @[MSHR.scala:86:14]
input [12:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14]
input [7:0] io_sinkc_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14]
input io_sinkc_bits_data, // @[MSHR.scala:86:14]
input io_sinkd_valid, // @[MSHR.scala:86:14]
input io_sinkd_bits_last, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14]
input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14]
input io_sinkd_bits_denied, // @[MSHR.scala:86:14]
input io_sinke_valid, // @[MSHR.scala:86:14]
input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14]
input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14]
input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14]
input io_nestedwb_b_toN, // @[MSHR.scala:86:14]
input io_nestedwb_b_toB, // @[MSHR.scala:86:14]
input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14]
input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14]
);
wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38]
wire final_meta_writeback_clients; // @[MSHR.scala:215:38]
wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38]
wire final_meta_writeback_dirty; // @[MSHR.scala:215:38]
wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7]
wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7]
wire [7:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7]
wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7]
wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7]
wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7]
wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7]
wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7]
wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7]
wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7]
wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7]
wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7]
wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7]
wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7]
wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7]
wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7]
wire [9:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7]
wire [12:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7]
wire [7:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7]
wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7]
wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7]
wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7]
wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7]
wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7]
wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7]
wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7]
wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7]
wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7]
wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7]
wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7]
wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68]
wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80]
wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21]
wire invalid_clients = 1'h0; // @[MSHR.scala:268:21]
wire _excluded_client_T = 1'h0; // @[MSHR.scala:279:38]
wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137]
wire _excluded_client_T_9 = 1'h0; // @[MSHR.scala:279:57]
wire excluded_client = 1'h0; // @[MSHR.scala:279:28]
wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire allocate_as_full_prio_0 = 1'h0; // @[MSHR.scala:504:34]
wire new_request_prio_0 = 1'h0; // @[MSHR.scala:506:24]
wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137]
wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _io_schedule_bits_b_bits_clients_T = 1'h1; // @[MSHR.scala:289:53]
wire _last_probe_T_1 = 1'h1; // @[MSHR.scala:459:66]
wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7]
wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21]
wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21]
wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70]
wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34]
wire [7:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34]
wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34]
wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34]
wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40]
wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93]
wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28]
wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39]
wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105]
wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55]
wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91]
wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41]
wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41]
wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41]
wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51]
wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64]
wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41]
wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41]
wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57]
wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41]
wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43]
wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40]
wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66]
wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41]
wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41]
wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41]
wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41]
wire no_wait; // @[MSHR.scala:183:83]
wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7]
wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7]
wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockB_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestB_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockC_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestC_0; // @[MSHR.scala:84:7]
wire io_status_valid_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7]
wire [7:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7]
wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7]
wire io_schedule_valid_0; // @[MSHR.scala:84:7]
reg request_valid; // @[MSHR.scala:97:30]
assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30]
reg request_prio_1; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20]
reg request_prio_2; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20]
reg request_control; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_opcode; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_param; // @[MSHR.scala:98:20]
reg [2:0] request_size; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20]
reg [7:0] request_source; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20]
reg [12:0] request_tag; // @[MSHR.scala:98:20]
assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_offset; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_put; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20]
reg [9:0] request_set; // @[MSHR.scala:98:20]
assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
reg meta_valid; // @[MSHR.scala:99:27]
reg meta_dirty; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17]
reg [1:0] meta_state; // @[MSHR.scala:100:17]
reg meta_clients; // @[MSHR.scala:100:17]
wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39]
assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients; // @[MSHR.scala:100:17, :289:51]
wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire _last_probe_T_2 = meta_clients; // @[MSHR.scala:100:17, :459:64]
reg [12:0] meta_tag; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17]
reg meta_hit; // @[MSHR.scala:100:17]
reg [2:0] meta_way; // @[MSHR.scala:100:17]
assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38]
reg s_rprobe; // @[MSHR.scala:121:33]
reg w_rprobeackfirst; // @[MSHR.scala:122:33]
reg w_rprobeacklast; // @[MSHR.scala:123:33]
reg s_release; // @[MSHR.scala:124:33]
reg w_releaseack; // @[MSHR.scala:125:33]
reg s_pprobe; // @[MSHR.scala:126:33]
reg s_acquire; // @[MSHR.scala:127:33]
reg s_flush; // @[MSHR.scala:128:33]
reg w_grantfirst; // @[MSHR.scala:129:33]
reg w_grantlast; // @[MSHR.scala:130:33]
reg w_grant; // @[MSHR.scala:131:33]
reg w_pprobeackfirst; // @[MSHR.scala:132:33]
reg w_pprobeacklast; // @[MSHR.scala:133:33]
reg w_pprobeack; // @[MSHR.scala:134:33]
reg s_grantack; // @[MSHR.scala:136:33]
reg s_execute; // @[MSHR.scala:137:33]
reg w_grantack; // @[MSHR.scala:138:33]
reg s_writeback; // @[MSHR.scala:139:33]
reg [2:0] sink; // @[MSHR.scala:147:17]
assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17]
reg gotT; // @[MSHR.scala:148:17]
reg bad_grant; // @[MSHR.scala:149:22]
assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22]
reg probes_done; // @[MSHR.scala:150:24]
reg probes_toN; // @[MSHR.scala:151:23]
reg probes_noT; // @[MSHR.scala:152:23]
wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28]
wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45]
wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62]
wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}]
wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82]
wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}]
wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103]
wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}]
assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}]
assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40]
wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39]
wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}]
wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}]
wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96]
assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}]
assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93]
assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28]
assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28]
wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43]
wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64]
wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}]
wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85]
wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}]
assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}]
assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39]
wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33]
wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}]
wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}]
assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}]
assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83]
wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31]
wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}]
assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}]
assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55]
wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31]
wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44]
assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}]
assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41]
wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32]
wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}]
assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}]
assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64]
wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31]
wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}]
assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}]
assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57]
wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31]
assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}]
assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43]
wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31]
assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}]
assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40]
wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34]
wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}]
wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70]
wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}]
assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}]
assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66]
wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49]
wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}]
wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}]
wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49]
wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}]
assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}]
assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105]
wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71]
wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71]
wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71]
wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71]
wire final_meta_writeback_hit; // @[MSHR.scala:215:38]
wire req_clientBit = request_source == 8'hA0; // @[Parameters.scala:46:9]
wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12]
wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12]
wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _req_needT_T_2; // @[Parameters.scala:270:13]
assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13]
wire _excluded_client_T_6; // @[Parameters.scala:279:117]
assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117]
wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42]
wire _req_needT_T_3; // @[Parameters.scala:270:42]
assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42]
wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11]
assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11]
wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42]
wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _req_needT_T_6; // @[Parameters.scala:271:14]
assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14]
wire _req_acquire_T; // @[MSHR.scala:219:36]
assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14]
wire _excluded_client_T_1; // @[Parameters.scala:279:12]
assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12]
wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52]
wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89]
wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52]
wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}]
wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}]
wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81]
wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}]
wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}]
wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}]
wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65]
wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}]
wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55]
wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78]
wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78]
assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78]
wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70]
wire _evict_T_2; // @[MSHR.scala:317:26]
assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _before_T_1; // @[MSHR.scala:317:26]
assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}]
wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}]
wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43]
assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43]
wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75]
wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}]
wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}]
wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54]
wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}]
wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45]
wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}]
wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}]
wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40]
wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40]
assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40]
wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65]
assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65]
wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41]
wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}]
wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72]
wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}]
wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70]
wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70]
wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53]
assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53]
wire _evict_T_1; // @[MSHR.scala:317:26]
assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire _before_T; // @[MSHR.scala:317:26]
assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70]
wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70]
wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55]
wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70]
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70]
wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66]
wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}]
wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}]
wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40]
assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30]
wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54]
wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}]
assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21]
assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21]
assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36]
assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36]
wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9]
wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}]
wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}]
wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}]
wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}]
wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}]
wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56]
wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70]
assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}]
wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51]
wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55]
wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52]
wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}]
wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}]
assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38]
assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91]
wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42]
wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70]
wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}]
assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}]
assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41]
wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42]
assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}]
assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41]
assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51]
assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41]
assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41]
assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}]
assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41]
wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42]
wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53]
wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53]
wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89]
wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79]
assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41]
wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42]
assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}]
assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41]
wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32]
wire [3:0] evict; // @[MSHR.scala:314:26]
wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32]
wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32]
wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32]
assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32]
assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39]
wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39]
assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39]
assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76]
wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76]
assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76]
assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32]
assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] before_0; // @[MSHR.scala:314:26]
wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32]
wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11]
assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] after; // @[MSHR.scala:314:26]
wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26]
wire _after_T; // @[MSHR.scala:317:26]
assign _after_T = _GEN_9; // @[MSHR.scala:317:26]
wire _prior_T; // @[MSHR.scala:317:26]
assign _prior_T = _GEN_9; // @[MSHR.scala:317:26]
wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32]
wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26]
wire _after_T_1; // @[MSHR.scala:317:26]
assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire _prior_T_1; // @[MSHR.scala:317:26]
assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32]
wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32]
assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32]
assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39]
wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39]
assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39]
assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76]
wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76]
assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76]
assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26]
wire _after_T_3; // @[MSHR.scala:317:26]
assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26]
wire _prior_T_3; // @[MSHR.scala:317:26]
assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26]
assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire probe_bit = io_sinkc_bits_source_0 == 8'hA0; // @[Parameters.scala:46:9]
wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9]
wire _last_probe_T; // @[MSHR.scala:459:33]
assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33]
wire _probes_done_T; // @[MSHR.scala:467:32]
assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32]
wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}]
wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11]
wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43]
wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75]
wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9]
wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}]
wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53]
wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}]
wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42]
wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55]
wire _w_rprobeacklast_T; // @[MSHR.scala:471:55]
assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55]
wire _w_pprobeacklast_T; // @[MSHR.scala:473:55]
assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55]
wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}]
wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42]
wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}]
wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77]
wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}]
wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}]
wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32]
wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33]
wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}]
wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35]
wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40]
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [7:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12]
wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _new_needT_T_2; // @[Parameters.scala:270:13]
assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13]
wire _new_skipProbe_T_5; // @[Parameters.scala:279:117]
assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117]
wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42]
wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _new_needT_T_6; // @[Parameters.scala:271:14]
assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14]
wire _new_skipProbe_T; // @[Parameters.scala:279:12]
assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12]
wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52]
wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89]
wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire new_clientBit = new_request_source == 8'hA0; // @[Parameters.scala:46:9]
wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}]
wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}]
wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}]
wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9]
wire [3:0] prior; // @[MSHR.scala:314:26]
wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32]
wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28] |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputUnit_49( // @[InputUnit.scala:158:7]
input clock, // @[InputUnit.scala:158:7]
input reset, // @[InputUnit.scala:158:7]
output [4:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14]
output [3:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
output [5:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
output [2:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [5:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14]
output [2:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_12, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_13, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_16, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_17, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_20, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_21, // @[InputUnit.scala:170:14]
input io_vcalloc_req_ready, // @[InputUnit.scala:170:14]
output io_vcalloc_req_valid, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_12, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_13, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_16, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_17, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_20, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_21, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_12, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_13, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_16, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_17, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_20, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_21, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_10, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_11, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_14, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_15, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_18, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_19, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_20, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_21, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_12, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_13, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_16, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_17, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_20, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_21, // @[InputUnit.scala:170:14]
input io_salloc_req_0_ready, // @[InputUnit.scala:170:14]
output io_salloc_req_0_valid, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_8, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_9, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_10, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_11, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_12, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_13, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_14, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_15, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_16, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_17, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_18, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_19, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_20, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_21, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_8, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_9, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_10, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_11, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_12, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_13, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_14, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_15, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_16, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_17, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_18, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_19, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_20, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_21, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14]
output io_out_0_valid, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14]
output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14]
output [3:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14]
output [5:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [5:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14]
output [4:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14]
output [4:0] io_debug_va_stall, // @[InputUnit.scala:170:14]
output [4:0] io_debug_sa_stall, // @[InputUnit.scala:170:14]
input io_in_flit_0_valid, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14]
input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14]
input [3:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
input [5:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
input [5:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input [4:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14]
output [21:0] io_in_credit_return, // @[InputUnit.scala:170:14]
output [21:0] io_in_vc_free // @[InputUnit.scala:170:14]
);
wire vcalloc_vals_21; // @[InputUnit.scala:266:32]
wire vcalloc_vals_20; // @[InputUnit.scala:266:32]
wire vcalloc_vals_17; // @[InputUnit.scala:266:32]
wire vcalloc_vals_16; // @[InputUnit.scala:266:32]
wire vcalloc_vals_13; // @[InputUnit.scala:266:32]
wire vcalloc_vals_12; // @[InputUnit.scala:266:32]
wire _salloc_arb_io_in_12_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_13_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_16_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_17_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_20_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_21_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26]
wire [21:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26]
wire _route_arbiter_io_in_12_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_13_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_16_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_17_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_20_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_21_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29]
wire [4:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29]
wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_6_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_7_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_8_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_8_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_8_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_9_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_9_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_9_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_10_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_10_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_10_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_11_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_11_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_11_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_12_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_12_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_12_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_12_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_13_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_13_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_13_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_13_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_14_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_14_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_14_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_15_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_15_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_15_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_16_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_16_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_16_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_16_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_17_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_17_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_17_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_17_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_18_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_18_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_18_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_19_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_19_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_19_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_20_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_20_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_20_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_20_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_21_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_21_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_21_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_21_bits_payload; // @[InputUnit.scala:181:28]
reg [2:0] states_12_g; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_0_12; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_0_13; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_12_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_12_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_12_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_12_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_12_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_12_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_13_g; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_0_12; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_0_13; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_13_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_13_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_13_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_13_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_13_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_13_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_16_g; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_0_16; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_0_17; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_16_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_16_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_16_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_16_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_16_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_16_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_17_g; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_0_16; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_0_17; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_17_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_17_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_17_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_17_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_17_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_17_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_20_g; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_12; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_13; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_16; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_17; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_20_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_20_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_20_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_20_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_20_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_20_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_21_g; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_12; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_13; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_16; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_17; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_20; // @[InputUnit.scala:192:19]
reg states_21_vc_sel_0_21; // @[InputUnit.scala:192:19]
reg [3:0] states_21_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [5:0] states_21_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_21_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [5:0] states_21_flow_egress_node; // @[InputUnit.scala:192:19]
reg [2:0] states_21_flow_egress_node_id; // @[InputUnit.scala:192:19]
wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30]
wire route_arbiter_io_in_12_valid = states_12_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_13_valid = states_13_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_16_valid = states_16_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_17_valid = states_17_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_20_valid = states_20_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_21_valid = states_21_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
reg [21:0] mask; // @[InputUnit.scala:250:21]
wire [21:0] _vcalloc_filter_T_3 = {vcalloc_vals_21, vcalloc_vals_20, 2'h0, vcalloc_vals_17, vcalloc_vals_16, 2'h0, vcalloc_vals_13, vcalloc_vals_12, 12'h0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32]
wire [43:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 44'h1 : _vcalloc_filter_T_3[1] ? 44'h2 : _vcalloc_filter_T_3[2] ? 44'h4 : _vcalloc_filter_T_3[3] ? 44'h8 : _vcalloc_filter_T_3[4] ? 44'h10 : _vcalloc_filter_T_3[5] ? 44'h20 : _vcalloc_filter_T_3[6] ? 44'h40 : _vcalloc_filter_T_3[7] ? 44'h80 : _vcalloc_filter_T_3[8] ? 44'h100 : _vcalloc_filter_T_3[9] ? 44'h200 : _vcalloc_filter_T_3[10] ? 44'h400 : _vcalloc_filter_T_3[11] ? 44'h800 : _vcalloc_filter_T_3[12] ? 44'h1000 : _vcalloc_filter_T_3[13] ? 44'h2000 : _vcalloc_filter_T_3[14] ? 44'h4000 : _vcalloc_filter_T_3[15] ? 44'h8000 : _vcalloc_filter_T_3[16] ? 44'h10000 : _vcalloc_filter_T_3[17] ? 44'h20000 : _vcalloc_filter_T_3[18] ? 44'h40000 : _vcalloc_filter_T_3[19] ? 44'h80000 : _vcalloc_filter_T_3[20] ? 44'h100000 : _vcalloc_filter_T_3[21] ? 44'h200000 : vcalloc_vals_12 ? 44'h400000000 : vcalloc_vals_13 ? 44'h800000000 : vcalloc_vals_16 ? 44'h4000000000 : vcalloc_vals_17 ? 44'h8000000000 : vcalloc_vals_20 ? 44'h40000000000 : {vcalloc_vals_21, 43'h0}; // @[OneHot.scala:85:71]
wire [21:0] vcalloc_sel = vcalloc_filter[21:0] | vcalloc_filter[43:22]; // @[Mux.scala:50:70]
wire io_vcalloc_req_valid_0 = vcalloc_vals_12 | vcalloc_vals_13 | vcalloc_vals_16 | vcalloc_vals_17 | vcalloc_vals_20 | vcalloc_vals_21; // @[package.scala:81:59]
assign vcalloc_vals_12 = states_12_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_13 = states_13_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_16 = states_16_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_17 = states_17_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_20 = states_20_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_21 = states_21_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
wire _GEN_1 = _GEN_0 & vcalloc_sel[12]; // @[Mux.scala:32:36]
wire _GEN_2 = _GEN_0 & vcalloc_sel[13]; // @[Mux.scala:32:36]
wire _GEN_3 = _GEN_0 & vcalloc_sel[16]; // @[Mux.scala:32:36]
wire _GEN_4 = _GEN_0 & vcalloc_sel[17]; // @[Mux.scala:32:36]
wire _GEN_5 = _GEN_0 & vcalloc_sel[20]; // @[Mux.scala:32:36]
wire _GEN_6 = _GEN_0 & vcalloc_sel[21]; // @[Mux.scala:32:36] |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulAddRecFNToRaw_postMul_e8_s24_18( // @[MulAddRecFN.scala:169:7]
input io_fromPreMul_isSigNaNAny, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isNaNAOrB, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isInfA, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isZeroA, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isInfB, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isZeroB, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_signProd, // @[MulAddRecFN.scala:172:16]
input [9:0] io_fromPreMul_sExpSum, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_doSubMags, // @[MulAddRecFN.scala:172:16]
input [4:0] io_fromPreMul_CDom_CAlignDist, // @[MulAddRecFN.scala:172:16]
input [25:0] io_fromPreMul_highAlignedSigC, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_bit0AlignedSigC, // @[MulAddRecFN.scala:172:16]
input [48:0] io_mulAddResult, // @[MulAddRecFN.scala:172:16]
output io_invalidExc, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isNaN, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isInf, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isZero, // @[MulAddRecFN.scala:172:16]
output io_rawOut_sign, // @[MulAddRecFN.scala:172:16]
output [9:0] io_rawOut_sExp, // @[MulAddRecFN.scala:172:16]
output [26:0] io_rawOut_sig // @[MulAddRecFN.scala:172:16]
);
wire io_fromPreMul_isSigNaNAny_0 = io_fromPreMul_isSigNaNAny; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isNaNAOrB_0 = io_fromPreMul_isNaNAOrB; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isInfA_0 = io_fromPreMul_isInfA; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isZeroA_0 = io_fromPreMul_isZeroA; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isInfB_0 = io_fromPreMul_isInfB; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isZeroB_0 = io_fromPreMul_isZeroB; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_signProd_0 = io_fromPreMul_signProd; // @[MulAddRecFN.scala:169:7]
wire [9:0] io_fromPreMul_sExpSum_0 = io_fromPreMul_sExpSum; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_doSubMags_0 = io_fromPreMul_doSubMags; // @[MulAddRecFN.scala:169:7]
wire [4:0] io_fromPreMul_CDom_CAlignDist_0 = io_fromPreMul_CDom_CAlignDist; // @[MulAddRecFN.scala:169:7]
wire [25:0] io_fromPreMul_highAlignedSigC_0 = io_fromPreMul_highAlignedSigC; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_bit0AlignedSigC_0 = io_fromPreMul_bit0AlignedSigC; // @[MulAddRecFN.scala:169:7]
wire [48:0] io_mulAddResult_0 = io_mulAddResult; // @[MulAddRecFN.scala:169:7]
wire [2:0] io_roundingMode = 3'h0; // @[MulAddRecFN.scala:169:7, :172:16]
wire io_fromPreMul_isZeroC = 1'h1; // @[MulAddRecFN.scala:169:7]
wire _io_rawOut_isZero_T = 1'h1; // @[MulAddRecFN.scala:283:14]
wire _io_rawOut_sign_T_3 = 1'h1; // @[MulAddRecFN.scala:287:29]
wire io_fromPreMul_isNaNC = 1'h0; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isInfC = 1'h0; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_CIsDominant = 1'h0; // @[MulAddRecFN.scala:169:7]
wire roundingMode_min = 1'h0; // @[MulAddRecFN.scala:186:45]
wire _io_invalidExc_T_7 = 1'h0; // @[MulAddRecFN.scala:275:61]
wire _io_invalidExc_T_8 = 1'h0; // @[MulAddRecFN.scala:276:35]
wire _io_rawOut_sign_T_1 = 1'h0; // @[MulAddRecFN.scala:286:31]
wire _io_rawOut_sign_T_8 = 1'h0; // @[MulAddRecFN.scala:289:26]
wire _io_rawOut_sign_T_10 = 1'h0; // @[MulAddRecFN.scala:289:46]
wire _io_rawOut_isNaN_T = io_fromPreMul_isNaNAOrB_0; // @[MulAddRecFN.scala:169:7, :278:48]
wire _io_invalidExc_T_9; // @[MulAddRecFN.scala:273:57]
wire notNaN_isInfOut; // @[MulAddRecFN.scala:265:44]
wire _io_rawOut_isZero_T_2; // @[MulAddRecFN.scala:282:25]
wire _io_rawOut_sign_T_17; // @[MulAddRecFN.scala:290:50]
wire [9:0] _io_rawOut_sExp_T; // @[MulAddRecFN.scala:293:26]
wire [26:0] _io_rawOut_sig_T; // @[MulAddRecFN.scala:294:25]
wire io_rawOut_isNaN_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_isInf_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_isZero_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_sign_0; // @[MulAddRecFN.scala:169:7]
wire [9:0] io_rawOut_sExp_0; // @[MulAddRecFN.scala:169:7]
wire [26:0] io_rawOut_sig_0; // @[MulAddRecFN.scala:169:7]
wire io_invalidExc_0; // @[MulAddRecFN.scala:169:7]
wire opSignC = io_fromPreMul_signProd_0 ^ io_fromPreMul_doSubMags_0; // @[MulAddRecFN.scala:169:7, :190:42]
wire _sigSum_T = io_mulAddResult_0[48]; // @[MulAddRecFN.scala:169:7, :192:32]
wire [26:0] _sigSum_T_1 = {1'h0, io_fromPreMul_highAlignedSigC_0} + 27'h1; // @[MulAddRecFN.scala:169:7, :193:47]
wire [25:0] _sigSum_T_2 = _sigSum_T_1[25:0]; // @[MulAddRecFN.scala:193:47]
wire [25:0] _sigSum_T_3 = _sigSum_T ? _sigSum_T_2 : io_fromPreMul_highAlignedSigC_0; // @[MulAddRecFN.scala:169:7, :192:{16,32}, :193:47]
wire [47:0] _sigSum_T_4 = io_mulAddResult_0[47:0]; // @[MulAddRecFN.scala:169:7, :196:28]
wire [73:0] sigSum_hi = {_sigSum_T_3, _sigSum_T_4}; // @[MulAddRecFN.scala:192:{12,16}, :196:28]
wire [74:0] sigSum = {sigSum_hi, io_fromPreMul_bit0AlignedSigC_0}; // @[MulAddRecFN.scala:169:7, :192:12]
wire [1:0] _CDom_sExp_T = {1'h0, io_fromPreMul_doSubMags_0}; // @[MulAddRecFN.scala:169:7, :203:69]
wire [10:0] _GEN = {io_fromPreMul_sExpSum_0[9], io_fromPreMul_sExpSum_0}; // @[MulAddRecFN.scala:169:7, :203:43]
wire [10:0] _CDom_sExp_T_1 = _GEN - {{9{_CDom_sExp_T[1]}}, _CDom_sExp_T}; // @[MulAddRecFN.scala:203:{43,69}]
wire [9:0] _CDom_sExp_T_2 = _CDom_sExp_T_1[9:0]; // @[MulAddRecFN.scala:203:43]
wire [9:0] CDom_sExp = _CDom_sExp_T_2; // @[MulAddRecFN.scala:203:43]
wire [49:0] _CDom_absSigSum_T = sigSum[74:25]; // @[MulAddRecFN.scala:192:12, :206:20]
wire [49:0] _CDom_absSigSum_T_1 = ~_CDom_absSigSum_T; // @[MulAddRecFN.scala:206:{13,20}]
wire [1:0] _CDom_absSigSum_T_2 = io_fromPreMul_highAlignedSigC_0[25:24]; // @[MulAddRecFN.scala:169:7, :209:46]
wire [2:0] _CDom_absSigSum_T_3 = {1'h0, _CDom_absSigSum_T_2}; // @[MulAddRecFN.scala:207:22, :209:46]
wire [46:0] _CDom_absSigSum_T_4 = sigSum[72:26]; // @[MulAddRecFN.scala:192:12, :210:23]
wire [49:0] _CDom_absSigSum_T_5 = {_CDom_absSigSum_T_3, _CDom_absSigSum_T_4}; // @[MulAddRecFN.scala:207:22, :209:71, :210:23]
wire [49:0] CDom_absSigSum = io_fromPreMul_doSubMags_0 ? _CDom_absSigSum_T_1 : _CDom_absSigSum_T_5; // @[MulAddRecFN.scala:169:7, :205:12, :206:13, :209:71]
wire [23:0] _CDom_absSigSumExtra_T = sigSum[24:1]; // @[MulAddRecFN.scala:192:12, :215:21]
wire [23:0] _CDom_absSigSumExtra_T_1 = ~_CDom_absSigSumExtra_T; // @[MulAddRecFN.scala:215:{14,21}]
wire _CDom_absSigSumExtra_T_2 = |_CDom_absSigSumExtra_T_1; // @[MulAddRecFN.scala:215:{14,36}]
wire [24:0] _CDom_absSigSumExtra_T_3 = sigSum[25:1]; // @[MulAddRecFN.scala:192:12, :216:19]
wire _CDom_absSigSumExtra_T_4 = |_CDom_absSigSumExtra_T_3; // @[MulAddRecFN.scala:216:{19,37}]
wire CDom_absSigSumExtra = io_fromPreMul_doSubMags_0 ? _CDom_absSigSumExtra_T_2 : _CDom_absSigSumExtra_T_4; // @[MulAddRecFN.scala:169:7, :214:12, :215:36, :216:37]
wire [80:0] _CDom_mainSig_T = {31'h0, CDom_absSigSum} << io_fromPreMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:169:7, :205:12, :219:24]
wire [28:0] CDom_mainSig = _CDom_mainSig_T[49:21]; // @[MulAddRecFN.scala:219:{24,56}]
wire [23:0] _CDom_reduced4SigExtra_T = CDom_absSigSum[23:0]; // @[MulAddRecFN.scala:205:12, :222:36]
wire [26:0] _CDom_reduced4SigExtra_T_1 = {_CDom_reduced4SigExtra_T, 3'h0}; // @[MulAddRecFN.scala:169:7, :172:16, :222:{36,53}]
wire _CDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:123:57]
wire CDom_reduced4SigExtra_reducedVec_0; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_1; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_2; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_3; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_4; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_5; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_6; // @[primitives.scala:118:30]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_0_T = _CDom_reduced4SigExtra_T_1[3:0]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_0_T_1 = |_CDom_reduced4SigExtra_reducedVec_0_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_0 = _CDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_1_T = _CDom_reduced4SigExtra_T_1[7:4]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_1_T_1 = |_CDom_reduced4SigExtra_reducedVec_1_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_1 = _CDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_2_T = _CDom_reduced4SigExtra_T_1[11:8]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_2_T_1 = |_CDom_reduced4SigExtra_reducedVec_2_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_2 = _CDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_3_T = _CDom_reduced4SigExtra_T_1[15:12]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_3_T_1 = |_CDom_reduced4SigExtra_reducedVec_3_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_3 = _CDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_4_T = _CDom_reduced4SigExtra_T_1[19:16]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_4_T_1 = |_CDom_reduced4SigExtra_reducedVec_4_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_4 = _CDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_5_T = _CDom_reduced4SigExtra_T_1[23:20]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_5_T_1 = |_CDom_reduced4SigExtra_reducedVec_5_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_5 = _CDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:118:30, :120:54]
wire [2:0] _CDom_reduced4SigExtra_reducedVec_6_T = _CDom_reduced4SigExtra_T_1[26:24]; // @[primitives.scala:123:15]
assign _CDom_reduced4SigExtra_reducedVec_6_T_1 = |_CDom_reduced4SigExtra_reducedVec_6_T; // @[primitives.scala:123:{15,57}]
assign CDom_reduced4SigExtra_reducedVec_6 = _CDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:118:30, :123:57]
wire [1:0] CDom_reduced4SigExtra_lo_hi = {CDom_reduced4SigExtra_reducedVec_2, CDom_reduced4SigExtra_reducedVec_1}; // @[primitives.scala:118:30, :124:20]
wire [2:0] CDom_reduced4SigExtra_lo = {CDom_reduced4SigExtra_lo_hi, CDom_reduced4SigExtra_reducedVec_0}; // @[primitives.scala:118:30, :124:20]
wire [1:0] CDom_reduced4SigExtra_hi_lo = {CDom_reduced4SigExtra_reducedVec_4, CDom_reduced4SigExtra_reducedVec_3}; // @[primitives.scala:118:30, :124:20]
wire [1:0] CDom_reduced4SigExtra_hi_hi = {CDom_reduced4SigExtra_reducedVec_6, CDom_reduced4SigExtra_reducedVec_5}; // @[primitives.scala:118:30, :124:20]
wire [3:0] CDom_reduced4SigExtra_hi = {CDom_reduced4SigExtra_hi_hi, CDom_reduced4SigExtra_hi_lo}; // @[primitives.scala:124:20]
wire [6:0] _CDom_reduced4SigExtra_T_2 = {CDom_reduced4SigExtra_hi, CDom_reduced4SigExtra_lo}; // @[primitives.scala:124:20]
wire [2:0] _CDom_reduced4SigExtra_T_3 = io_fromPreMul_CDom_CAlignDist_0[4:2]; // @[MulAddRecFN.scala:169:7, :223:51]
wire [2:0] _CDom_reduced4SigExtra_T_4 = ~_CDom_reduced4SigExtra_T_3; // @[primitives.scala:52:21]
wire [8:0] CDom_reduced4SigExtra_shift = $signed(9'sh100 >>> _CDom_reduced4SigExtra_T_4); // @[primitives.scala:52:21, :76:56]
wire [5:0] _CDom_reduced4SigExtra_T_5 = CDom_reduced4SigExtra_shift[6:1]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _CDom_reduced4SigExtra_T_6 = _CDom_reduced4SigExtra_T_5[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _CDom_reduced4SigExtra_T_7 = _CDom_reduced4SigExtra_T_6[1:0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_8 = _CDom_reduced4SigExtra_T_7[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_9 = _CDom_reduced4SigExtra_T_7[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_10 = {_CDom_reduced4SigExtra_T_8, _CDom_reduced4SigExtra_T_9}; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_11 = _CDom_reduced4SigExtra_T_6[3:2]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_12 = _CDom_reduced4SigExtra_T_11[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_13 = _CDom_reduced4SigExtra_T_11[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_14 = {_CDom_reduced4SigExtra_T_12, _CDom_reduced4SigExtra_T_13}; // @[primitives.scala:77:20]
wire [3:0] _CDom_reduced4SigExtra_T_15 = {_CDom_reduced4SigExtra_T_10, _CDom_reduced4SigExtra_T_14}; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_16 = _CDom_reduced4SigExtra_T_5[5:4]; // @[primitives.scala:77:20, :78:22]
wire _CDom_reduced4SigExtra_T_17 = _CDom_reduced4SigExtra_T_16[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_18 = _CDom_reduced4SigExtra_T_16[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_19 = {_CDom_reduced4SigExtra_T_17, _CDom_reduced4SigExtra_T_18}; // @[primitives.scala:77:20]
wire [5:0] _CDom_reduced4SigExtra_T_20 = {_CDom_reduced4SigExtra_T_15, _CDom_reduced4SigExtra_T_19}; // @[primitives.scala:77:20]
wire [6:0] _CDom_reduced4SigExtra_T_21 = {1'h0, _CDom_reduced4SigExtra_T_2[5:0] & _CDom_reduced4SigExtra_T_20}; // @[primitives.scala:77:20, :124:20]
wire CDom_reduced4SigExtra = |_CDom_reduced4SigExtra_T_21; // @[MulAddRecFN.scala:222:72, :223:73]
wire [25:0] _CDom_sig_T = CDom_mainSig[28:3]; // @[MulAddRecFN.scala:219:56, :225:25]
wire [2:0] _CDom_sig_T_1 = CDom_mainSig[2:0]; // @[MulAddRecFN.scala:219:56, :226:25]
wire _CDom_sig_T_2 = |_CDom_sig_T_1; // @[MulAddRecFN.scala:226:{25,32}]
wire _CDom_sig_T_3 = _CDom_sig_T_2 | CDom_reduced4SigExtra; // @[MulAddRecFN.scala:223:73, :226:{32,36}]
wire _CDom_sig_T_4 = _CDom_sig_T_3 | CDom_absSigSumExtra; // @[MulAddRecFN.scala:214:12, :226:{36,61}]
wire [26:0] CDom_sig = {_CDom_sig_T, _CDom_sig_T_4}; // @[MulAddRecFN.scala:225:{12,25}, :226:61]
wire notCDom_signSigSum = sigSum[51]; // @[MulAddRecFN.scala:192:12, :232:36]
wire [50:0] _notCDom_absSigSum_T = sigSum[50:0]; // @[MulAddRecFN.scala:192:12, :235:20]
wire [50:0] _notCDom_absSigSum_T_2 = sigSum[50:0]; // @[MulAddRecFN.scala:192:12, :235:20, :236:19]
wire [50:0] _notCDom_absSigSum_T_1 = ~_notCDom_absSigSum_T; // @[MulAddRecFN.scala:235:{13,20}]
wire [51:0] _notCDom_absSigSum_T_3 = {1'h0, _notCDom_absSigSum_T_2} + {51'h0, io_fromPreMul_doSubMags_0}; // @[MulAddRecFN.scala:169:7, :236:{19,41}]
wire [50:0] _notCDom_absSigSum_T_4 = _notCDom_absSigSum_T_3[50:0]; // @[MulAddRecFN.scala:236:41]
wire [50:0] notCDom_absSigSum = notCDom_signSigSum ? _notCDom_absSigSum_T_1 : _notCDom_absSigSum_T_4; // @[MulAddRecFN.scala:232:36, :234:12, :235:13, :236:41]
wire _notCDom_reduced2AbsSigSum_reducedVec_0_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_1_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_2_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_3_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_4_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_5_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_6_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_7_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_8_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_9_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_10_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_11_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_12_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_13_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_14_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_15_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_16_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_17_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_18_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_19_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_20_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_21_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_22_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_23_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_24_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_25_T_1; // @[primitives.scala:106:57]
wire notCDom_reduced2AbsSigSum_reducedVec_0; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_1; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_2; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_3; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_4; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_5; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_6; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_7; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_8; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_9; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_10; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_11; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_12; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_13; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_14; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_15; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_16; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_17; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_18; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_19; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_20; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_21; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_22; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_23; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_24; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_25; // @[primitives.scala:101:30]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_0_T = notCDom_absSigSum[1:0]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_0_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_0_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_0 = _notCDom_reduced2AbsSigSum_reducedVec_0_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_1_T = notCDom_absSigSum[3:2]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_1_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_1_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_1 = _notCDom_reduced2AbsSigSum_reducedVec_1_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_2_T = notCDom_absSigSum[5:4]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_2_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_2_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_2 = _notCDom_reduced2AbsSigSum_reducedVec_2_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_3_T = notCDom_absSigSum[7:6]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_3_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_3_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_3 = _notCDom_reduced2AbsSigSum_reducedVec_3_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_4_T = notCDom_absSigSum[9:8]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_4_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_4_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_4 = _notCDom_reduced2AbsSigSum_reducedVec_4_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_5_T = notCDom_absSigSum[11:10]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_5_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_5_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_5 = _notCDom_reduced2AbsSigSum_reducedVec_5_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_6_T = notCDom_absSigSum[13:12]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_6_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_6_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_6 = _notCDom_reduced2AbsSigSum_reducedVec_6_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_7_T = notCDom_absSigSum[15:14]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_7_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_7_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_7 = _notCDom_reduced2AbsSigSum_reducedVec_7_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_8_T = notCDom_absSigSum[17:16]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_8_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_8_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_8 = _notCDom_reduced2AbsSigSum_reducedVec_8_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_9_T = notCDom_absSigSum[19:18]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_9_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_9_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_9 = _notCDom_reduced2AbsSigSum_reducedVec_9_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_10_T = notCDom_absSigSum[21:20]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_10_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_10_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_10 = _notCDom_reduced2AbsSigSum_reducedVec_10_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_11_T = notCDom_absSigSum[23:22]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_11_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_11_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_11 = _notCDom_reduced2AbsSigSum_reducedVec_11_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_12_T = notCDom_absSigSum[25:24]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_12_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_12_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_12 = _notCDom_reduced2AbsSigSum_reducedVec_12_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_13_T = notCDom_absSigSum[27:26]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_13_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_13_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_13 = _notCDom_reduced2AbsSigSum_reducedVec_13_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_14_T = notCDom_absSigSum[29:28]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_14_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_14_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_14 = _notCDom_reduced2AbsSigSum_reducedVec_14_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_15_T = notCDom_absSigSum[31:30]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_15_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_15_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_15 = _notCDom_reduced2AbsSigSum_reducedVec_15_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_16_T = notCDom_absSigSum[33:32]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_16_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_16_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_16 = _notCDom_reduced2AbsSigSum_reducedVec_16_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_17_T = notCDom_absSigSum[35:34]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_17_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_17_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_17 = _notCDom_reduced2AbsSigSum_reducedVec_17_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_18_T = notCDom_absSigSum[37:36]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_18_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_18_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_18 = _notCDom_reduced2AbsSigSum_reducedVec_18_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_19_T = notCDom_absSigSum[39:38]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_19_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_19_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_19 = _notCDom_reduced2AbsSigSum_reducedVec_19_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_20_T = notCDom_absSigSum[41:40]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_20_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_20_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_20 = _notCDom_reduced2AbsSigSum_reducedVec_20_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_21_T = notCDom_absSigSum[43:42]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_21_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_21_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_21 = _notCDom_reduced2AbsSigSum_reducedVec_21_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_22_T = notCDom_absSigSum[45:44]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_22_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_22_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_22 = _notCDom_reduced2AbsSigSum_reducedVec_22_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_23_T = notCDom_absSigSum[47:46]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_23_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_23_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_23 = _notCDom_reduced2AbsSigSum_reducedVec_23_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_24_T = notCDom_absSigSum[49:48]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_24_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_24_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_24 = _notCDom_reduced2AbsSigSum_reducedVec_24_T_1; // @[primitives.scala:101:30, :103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_25_T = notCDom_absSigSum[50]; // @[primitives.scala:106:15]
assign _notCDom_reduced2AbsSigSum_reducedVec_25_T_1 = _notCDom_reduced2AbsSigSum_reducedVec_25_T; // @[primitives.scala:106:{15,57}]
assign notCDom_reduced2AbsSigSum_reducedVec_25 = _notCDom_reduced2AbsSigSum_reducedVec_25_T_1; // @[primitives.scala:101:30, :106:57]
wire [1:0] notCDom_reduced2AbsSigSum_lo_lo_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_2, notCDom_reduced2AbsSigSum_reducedVec_1}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_lo_lo = {notCDom_reduced2AbsSigSum_lo_lo_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_0}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_lo_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_5, notCDom_reduced2AbsSigSum_reducedVec_4}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_lo_hi = {notCDom_reduced2AbsSigSum_lo_lo_hi_hi, notCDom_reduced2AbsSigSum_reducedVec_3}; // @[primitives.scala:101:30, :107:20]
wire [5:0] notCDom_reduced2AbsSigSum_lo_lo = {notCDom_reduced2AbsSigSum_lo_lo_hi, notCDom_reduced2AbsSigSum_lo_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_8, notCDom_reduced2AbsSigSum_reducedVec_7}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_hi_lo = {notCDom_reduced2AbsSigSum_lo_hi_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_6}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_hi_lo = {notCDom_reduced2AbsSigSum_reducedVec_10, notCDom_reduced2AbsSigSum_reducedVec_9}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_12, notCDom_reduced2AbsSigSum_reducedVec_11}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced2AbsSigSum_lo_hi_hi = {notCDom_reduced2AbsSigSum_lo_hi_hi_hi, notCDom_reduced2AbsSigSum_lo_hi_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] notCDom_reduced2AbsSigSum_lo_hi = {notCDom_reduced2AbsSigSum_lo_hi_hi, notCDom_reduced2AbsSigSum_lo_hi_lo}; // @[primitives.scala:107:20]
wire [12:0] notCDom_reduced2AbsSigSum_lo = {notCDom_reduced2AbsSigSum_lo_hi, notCDom_reduced2AbsSigSum_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_lo_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_15, notCDom_reduced2AbsSigSum_reducedVec_14}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_lo_lo = {notCDom_reduced2AbsSigSum_hi_lo_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_13}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_lo_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_18, notCDom_reduced2AbsSigSum_reducedVec_17}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_lo_hi = {notCDom_reduced2AbsSigSum_hi_lo_hi_hi, notCDom_reduced2AbsSigSum_reducedVec_16}; // @[primitives.scala:101:30, :107:20]
wire [5:0] notCDom_reduced2AbsSigSum_hi_lo = {notCDom_reduced2AbsSigSum_hi_lo_hi, notCDom_reduced2AbsSigSum_hi_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_21, notCDom_reduced2AbsSigSum_reducedVec_20}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_hi_lo = {notCDom_reduced2AbsSigSum_hi_hi_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_19}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_hi_lo = {notCDom_reduced2AbsSigSum_reducedVec_23, notCDom_reduced2AbsSigSum_reducedVec_22}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_25, notCDom_reduced2AbsSigSum_reducedVec_24}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced2AbsSigSum_hi_hi_hi = {notCDom_reduced2AbsSigSum_hi_hi_hi_hi, notCDom_reduced2AbsSigSum_hi_hi_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] notCDom_reduced2AbsSigSum_hi_hi = {notCDom_reduced2AbsSigSum_hi_hi_hi, notCDom_reduced2AbsSigSum_hi_hi_lo}; // @[primitives.scala:107:20]
wire [12:0] notCDom_reduced2AbsSigSum_hi = {notCDom_reduced2AbsSigSum_hi_hi, notCDom_reduced2AbsSigSum_hi_lo}; // @[primitives.scala:107:20]
wire [25:0] notCDom_reduced2AbsSigSum = {notCDom_reduced2AbsSigSum_hi, notCDom_reduced2AbsSigSum_lo}; // @[primitives.scala:107:20]
wire _notCDom_normDistReduced2_T = notCDom_reduced2AbsSigSum[0]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_1 = notCDom_reduced2AbsSigSum[1]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_2 = notCDom_reduced2AbsSigSum[2]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_3 = notCDom_reduced2AbsSigSum[3]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_4 = notCDom_reduced2AbsSigSum[4]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_5 = notCDom_reduced2AbsSigSum[5]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_6 = notCDom_reduced2AbsSigSum[6]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_7 = notCDom_reduced2AbsSigSum[7]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_8 = notCDom_reduced2AbsSigSum[8]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_9 = notCDom_reduced2AbsSigSum[9]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_10 = notCDom_reduced2AbsSigSum[10]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_11 = notCDom_reduced2AbsSigSum[11]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_12 = notCDom_reduced2AbsSigSum[12]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_13 = notCDom_reduced2AbsSigSum[13]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_14 = notCDom_reduced2AbsSigSum[14]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_15 = notCDom_reduced2AbsSigSum[15]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_16 = notCDom_reduced2AbsSigSum[16]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_17 = notCDom_reduced2AbsSigSum[17]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_18 = notCDom_reduced2AbsSigSum[18]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_19 = notCDom_reduced2AbsSigSum[19]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_20 = notCDom_reduced2AbsSigSum[20]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_21 = notCDom_reduced2AbsSigSum[21]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_22 = notCDom_reduced2AbsSigSum[22]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_23 = notCDom_reduced2AbsSigSum[23]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_24 = notCDom_reduced2AbsSigSum[24]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_25 = notCDom_reduced2AbsSigSum[25]; // @[primitives.scala:91:52, :107:20]
wire [4:0] _notCDom_normDistReduced2_T_26 = {4'hC, ~_notCDom_normDistReduced2_T_1}; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_27 = _notCDom_normDistReduced2_T_2 ? 5'h17 : _notCDom_normDistReduced2_T_26; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_28 = _notCDom_normDistReduced2_T_3 ? 5'h16 : _notCDom_normDistReduced2_T_27; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_29 = _notCDom_normDistReduced2_T_4 ? 5'h15 : _notCDom_normDistReduced2_T_28; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_30 = _notCDom_normDistReduced2_T_5 ? 5'h14 : _notCDom_normDistReduced2_T_29; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_31 = _notCDom_normDistReduced2_T_6 ? 5'h13 : _notCDom_normDistReduced2_T_30; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_32 = _notCDom_normDistReduced2_T_7 ? 5'h12 : _notCDom_normDistReduced2_T_31; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_33 = _notCDom_normDistReduced2_T_8 ? 5'h11 : _notCDom_normDistReduced2_T_32; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_34 = _notCDom_normDistReduced2_T_9 ? 5'h10 : _notCDom_normDistReduced2_T_33; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_35 = _notCDom_normDistReduced2_T_10 ? 5'hF : _notCDom_normDistReduced2_T_34; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_36 = _notCDom_normDistReduced2_T_11 ? 5'hE : _notCDom_normDistReduced2_T_35; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_37 = _notCDom_normDistReduced2_T_12 ? 5'hD : _notCDom_normDistReduced2_T_36; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_38 = _notCDom_normDistReduced2_T_13 ? 5'hC : _notCDom_normDistReduced2_T_37; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_39 = _notCDom_normDistReduced2_T_14 ? 5'hB : _notCDom_normDistReduced2_T_38; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_40 = _notCDom_normDistReduced2_T_15 ? 5'hA : _notCDom_normDistReduced2_T_39; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_41 = _notCDom_normDistReduced2_T_16 ? 5'h9 : _notCDom_normDistReduced2_T_40; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_42 = _notCDom_normDistReduced2_T_17 ? 5'h8 : _notCDom_normDistReduced2_T_41; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_43 = _notCDom_normDistReduced2_T_18 ? 5'h7 : _notCDom_normDistReduced2_T_42; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_44 = _notCDom_normDistReduced2_T_19 ? 5'h6 : _notCDom_normDistReduced2_T_43; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_45 = _notCDom_normDistReduced2_T_20 ? 5'h5 : _notCDom_normDistReduced2_T_44; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_46 = _notCDom_normDistReduced2_T_21 ? 5'h4 : _notCDom_normDistReduced2_T_45; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_47 = _notCDom_normDistReduced2_T_22 ? 5'h3 : _notCDom_normDistReduced2_T_46; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_48 = _notCDom_normDistReduced2_T_23 ? 5'h2 : _notCDom_normDistReduced2_T_47; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_49 = _notCDom_normDistReduced2_T_24 ? 5'h1 : _notCDom_normDistReduced2_T_48; // @[Mux.scala:50:70]
wire [4:0] notCDom_normDistReduced2 = _notCDom_normDistReduced2_T_25 ? 5'h0 : _notCDom_normDistReduced2_T_49; // @[Mux.scala:50:70]
wire [5:0] notCDom_nearNormDist = {notCDom_normDistReduced2, 1'h0}; // @[Mux.scala:50:70]
wire [6:0] _notCDom_sExp_T = {1'h0, notCDom_nearNormDist}; // @[MulAddRecFN.scala:240:56, :241:76]
wire [10:0] _notCDom_sExp_T_1 = _GEN - {{4{_notCDom_sExp_T[6]}}, _notCDom_sExp_T}; // @[MulAddRecFN.scala:203:43, :241:{46,76}]
wire [9:0] _notCDom_sExp_T_2 = _notCDom_sExp_T_1[9:0]; // @[MulAddRecFN.scala:241:46]
wire [9:0] notCDom_sExp = _notCDom_sExp_T_2; // @[MulAddRecFN.scala:241:46]
assign _io_rawOut_sExp_T = notCDom_sExp; // @[MulAddRecFN.scala:241:46, :293:26]
wire [113:0] _notCDom_mainSig_T = {63'h0, notCDom_absSigSum} << notCDom_nearNormDist; // @[MulAddRecFN.scala:234:12, :240:56, :243:27]
wire [28:0] notCDom_mainSig = _notCDom_mainSig_T[51:23]; // @[MulAddRecFN.scala:243:{27,50}]
wire [12:0] _notCDom_reduced4SigExtra_T = notCDom_reduced2AbsSigSum[12:0]; // @[primitives.scala:107:20]
wire [12:0] _notCDom_reduced4SigExtra_T_1 = _notCDom_reduced4SigExtra_T; // @[MulAddRecFN.scala:247:{39,55}]
wire _notCDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:106:57]
wire notCDom_reduced4SigExtra_reducedVec_0; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_1; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_2; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_3; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_4; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_5; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_6; // @[primitives.scala:101:30]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_0_T = _notCDom_reduced4SigExtra_T_1[1:0]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_0_T_1 = |_notCDom_reduced4SigExtra_reducedVec_0_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_0 = _notCDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_1_T = _notCDom_reduced4SigExtra_T_1[3:2]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_1_T_1 = |_notCDom_reduced4SigExtra_reducedVec_1_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_1 = _notCDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_2_T = _notCDom_reduced4SigExtra_T_1[5:4]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_2_T_1 = |_notCDom_reduced4SigExtra_reducedVec_2_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_2 = _notCDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_3_T = _notCDom_reduced4SigExtra_T_1[7:6]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_3_T_1 = |_notCDom_reduced4SigExtra_reducedVec_3_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_3 = _notCDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_4_T = _notCDom_reduced4SigExtra_T_1[9:8]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_4_T_1 = |_notCDom_reduced4SigExtra_reducedVec_4_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_4 = _notCDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_5_T = _notCDom_reduced4SigExtra_T_1[11:10]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_5_T_1 = |_notCDom_reduced4SigExtra_reducedVec_5_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_5 = _notCDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:101:30, :103:54]
wire _notCDom_reduced4SigExtra_reducedVec_6_T = _notCDom_reduced4SigExtra_T_1[12]; // @[primitives.scala:106:15]
assign _notCDom_reduced4SigExtra_reducedVec_6_T_1 = _notCDom_reduced4SigExtra_reducedVec_6_T; // @[primitives.scala:106:{15,57}]
assign notCDom_reduced4SigExtra_reducedVec_6 = _notCDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:101:30, :106:57]
wire [1:0] notCDom_reduced4SigExtra_lo_hi = {notCDom_reduced4SigExtra_reducedVec_2, notCDom_reduced4SigExtra_reducedVec_1}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced4SigExtra_lo = {notCDom_reduced4SigExtra_lo_hi, notCDom_reduced4SigExtra_reducedVec_0}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced4SigExtra_hi_lo = {notCDom_reduced4SigExtra_reducedVec_4, notCDom_reduced4SigExtra_reducedVec_3}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced4SigExtra_hi_hi = {notCDom_reduced4SigExtra_reducedVec_6, notCDom_reduced4SigExtra_reducedVec_5}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced4SigExtra_hi = {notCDom_reduced4SigExtra_hi_hi, notCDom_reduced4SigExtra_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] _notCDom_reduced4SigExtra_T_2 = {notCDom_reduced4SigExtra_hi, notCDom_reduced4SigExtra_lo}; // @[primitives.scala:107:20]
wire [3:0] _notCDom_reduced4SigExtra_T_3 = notCDom_normDistReduced2[4:1]; // @[Mux.scala:50:70]
wire [3:0] _notCDom_reduced4SigExtra_T_4 = ~_notCDom_reduced4SigExtra_T_3; // @[primitives.scala:52:21]
wire [16:0] notCDom_reduced4SigExtra_shift = $signed(17'sh10000 >>> _notCDom_reduced4SigExtra_T_4); // @[primitives.scala:52:21, :76:56]
wire [5:0] _notCDom_reduced4SigExtra_T_5 = notCDom_reduced4SigExtra_shift[6:1]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _notCDom_reduced4SigExtra_T_6 = _notCDom_reduced4SigExtra_T_5[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _notCDom_reduced4SigExtra_T_7 = _notCDom_reduced4SigExtra_T_6[1:0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_8 = _notCDom_reduced4SigExtra_T_7[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_9 = _notCDom_reduced4SigExtra_T_7[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_10 = {_notCDom_reduced4SigExtra_T_8, _notCDom_reduced4SigExtra_T_9}; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_11 = _notCDom_reduced4SigExtra_T_6[3:2]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_12 = _notCDom_reduced4SigExtra_T_11[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_13 = _notCDom_reduced4SigExtra_T_11[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_14 = {_notCDom_reduced4SigExtra_T_12, _notCDom_reduced4SigExtra_T_13}; // @[primitives.scala:77:20]
wire [3:0] _notCDom_reduced4SigExtra_T_15 = {_notCDom_reduced4SigExtra_T_10, _notCDom_reduced4SigExtra_T_14}; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_16 = _notCDom_reduced4SigExtra_T_5[5:4]; // @[primitives.scala:77:20, :78:22]
wire _notCDom_reduced4SigExtra_T_17 = _notCDom_reduced4SigExtra_T_16[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_18 = _notCDom_reduced4SigExtra_T_16[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_19 = {_notCDom_reduced4SigExtra_T_17, _notCDom_reduced4SigExtra_T_18}; // @[primitives.scala:77:20]
wire [5:0] _notCDom_reduced4SigExtra_T_20 = {_notCDom_reduced4SigExtra_T_15, _notCDom_reduced4SigExtra_T_19}; // @[primitives.scala:77:20]
wire [6:0] _notCDom_reduced4SigExtra_T_21 = {1'h0, _notCDom_reduced4SigExtra_T_2[5:0] & _notCDom_reduced4SigExtra_T_20}; // @[primitives.scala:77:20, :107:20]
wire notCDom_reduced4SigExtra = |_notCDom_reduced4SigExtra_T_21; // @[MulAddRecFN.scala:247:78, :249:11]
wire [25:0] _notCDom_sig_T = notCDom_mainSig[28:3]; // @[MulAddRecFN.scala:243:50, :251:28]
wire [2:0] _notCDom_sig_T_1 = notCDom_mainSig[2:0]; // @[MulAddRecFN.scala:243:50, :252:28]
wire _notCDom_sig_T_2 = |_notCDom_sig_T_1; // @[MulAddRecFN.scala:252:{28,35}]
wire _notCDom_sig_T_3 = _notCDom_sig_T_2 | notCDom_reduced4SigExtra; // @[MulAddRecFN.scala:249:11, :252:{35,39}]
wire [26:0] notCDom_sig = {_notCDom_sig_T, _notCDom_sig_T_3}; // @[MulAddRecFN.scala:251:{12,28}, :252:39]
assign _io_rawOut_sig_T = notCDom_sig; // @[MulAddRecFN.scala:251:12, :294:25]
wire [1:0] _notCDom_completeCancellation_T = notCDom_sig[26:25]; // @[MulAddRecFN.scala:251:12, :255:21]
wire notCDom_completeCancellation = _notCDom_completeCancellation_T == 2'h0; // @[primitives.scala:103:54]
wire _io_rawOut_isZero_T_1 = notCDom_completeCancellation; // @[MulAddRecFN.scala:255:50, :283:42]
wire _notCDom_sign_T = io_fromPreMul_signProd_0 ^ notCDom_signSigSum; // @[MulAddRecFN.scala:169:7, :232:36, :259:36]
wire notCDom_sign = ~notCDom_completeCancellation & _notCDom_sign_T; // @[MulAddRecFN.scala:255:50, :257:12, :259:36]
wire _io_rawOut_sign_T_15 = notCDom_sign; // @[MulAddRecFN.scala:257:12, :292:17]
wire _GEN_0 = io_fromPreMul_isInfA_0 | io_fromPreMul_isInfB_0; // @[MulAddRecFN.scala:169:7, :264:49]
wire notNaN_isInfProd; // @[MulAddRecFN.scala:264:49]
assign notNaN_isInfProd = _GEN_0; // @[MulAddRecFN.scala:264:49]
wire _io_invalidExc_T_5; // @[MulAddRecFN.scala:275:36]
assign _io_invalidExc_T_5 = _GEN_0; // @[MulAddRecFN.scala:264:49, :275:36]
assign notNaN_isInfOut = notNaN_isInfProd; // @[MulAddRecFN.scala:264:49, :265:44]
assign io_rawOut_isInf_0 = notNaN_isInfOut; // @[MulAddRecFN.scala:169:7, :265:44]
wire _notNaN_addZeros_T = io_fromPreMul_isZeroA_0 | io_fromPreMul_isZeroB_0; // @[MulAddRecFN.scala:169:7, :267:32]
wire notNaN_addZeros = _notNaN_addZeros_T; // @[MulAddRecFN.scala:267:{32,58}]
wire _io_rawOut_sign_T_4 = notNaN_addZeros; // @[MulAddRecFN.scala:267:58, :287:26]
wire _io_invalidExc_T = io_fromPreMul_isInfA_0 & io_fromPreMul_isZeroB_0; // @[MulAddRecFN.scala:169:7, :272:31]
wire _io_invalidExc_T_1 = io_fromPreMul_isSigNaNAny_0 | _io_invalidExc_T; // @[MulAddRecFN.scala:169:7, :271:35, :272:31]
wire _io_invalidExc_T_2 = io_fromPreMul_isZeroA_0 & io_fromPreMul_isInfB_0; // @[MulAddRecFN.scala:169:7, :273:32]
wire _io_invalidExc_T_3 = _io_invalidExc_T_1 | _io_invalidExc_T_2; // @[MulAddRecFN.scala:271:35, :272:57, :273:32]
assign _io_invalidExc_T_9 = _io_invalidExc_T_3; // @[MulAddRecFN.scala:272:57, :273:57]
wire _io_invalidExc_T_4 = ~io_fromPreMul_isNaNAOrB_0; // @[MulAddRecFN.scala:169:7, :274:10]
wire _io_invalidExc_T_6 = _io_invalidExc_T_4 & _io_invalidExc_T_5; // @[MulAddRecFN.scala:274:{10,36}, :275:36]
assign io_invalidExc_0 = _io_invalidExc_T_9; // @[MulAddRecFN.scala:169:7, :273:57]
assign io_rawOut_isNaN_0 = _io_rawOut_isNaN_T; // @[MulAddRecFN.scala:169:7, :278:48]
assign _io_rawOut_isZero_T_2 = notNaN_addZeros | _io_rawOut_isZero_T_1; // @[MulAddRecFN.scala:267:58, :282:25, :283:42]
assign io_rawOut_isZero_0 = _io_rawOut_isZero_T_2; // @[MulAddRecFN.scala:169:7, :282:25]
wire _io_rawOut_sign_T = notNaN_isInfProd & io_fromPreMul_signProd_0; // @[MulAddRecFN.scala:169:7, :264:49, :285:27]
wire _io_rawOut_sign_T_2 = _io_rawOut_sign_T; // @[MulAddRecFN.scala:285:{27,54}]
wire _io_rawOut_sign_T_5 = _io_rawOut_sign_T_4 & io_fromPreMul_signProd_0; // @[MulAddRecFN.scala:169:7, :287:{26,48}]
wire _io_rawOut_sign_T_6 = _io_rawOut_sign_T_5 & opSignC; // @[MulAddRecFN.scala:190:42, :287:48, :288:36]
wire _io_rawOut_sign_T_7 = _io_rawOut_sign_T_2 | _io_rawOut_sign_T_6; // @[MulAddRecFN.scala:285:54, :286:43, :288:36]
wire _io_rawOut_sign_T_11 = _io_rawOut_sign_T_7; // @[MulAddRecFN.scala:286:43, :288:48]
wire _io_rawOut_sign_T_9 = io_fromPreMul_signProd_0 | opSignC; // @[MulAddRecFN.scala:169:7, :190:42, :290:37]
wire _io_rawOut_sign_T_12 = ~notNaN_isInfOut; // @[MulAddRecFN.scala:265:44, :291:10]
wire _io_rawOut_sign_T_13 = ~notNaN_addZeros; // @[MulAddRecFN.scala:267:58, :291:31]
wire _io_rawOut_sign_T_14 = _io_rawOut_sign_T_12 & _io_rawOut_sign_T_13; // @[MulAddRecFN.scala:291:{10,28,31}]
wire _io_rawOut_sign_T_16 = _io_rawOut_sign_T_14 & _io_rawOut_sign_T_15; // @[MulAddRecFN.scala:291:{28,49}, :292:17]
assign _io_rawOut_sign_T_17 = _io_rawOut_sign_T_11 | _io_rawOut_sign_T_16; // @[MulAddRecFN.scala:288:48, :290:50, :291:49]
assign io_rawOut_sign_0 = _io_rawOut_sign_T_17; // @[MulAddRecFN.scala:169:7, :290:50]
assign io_rawOut_sExp_0 = _io_rawOut_sExp_T; // @[MulAddRecFN.scala:169:7, :293:26]
assign io_rawOut_sig_0 = _io_rawOut_sig_T; // @[MulAddRecFN.scala:169:7, :294:25]
assign io_invalidExc = io_invalidExc_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sign = io_rawOut_sign_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sig = io_rawOut_sig_0; // @[MulAddRecFN.scala:169:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputUnit_1( // @[InputUnit.scala:158:7]
input clock, // @[InputUnit.scala:158:7]
input reset, // @[InputUnit.scala:158:7]
output [2:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
output [3:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [3:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_0, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_1, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_2, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_3, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_4, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_0_5, // @[InputUnit.scala:170:14]
input io_vcalloc_req_ready, // @[InputUnit.scala:170:14]
output io_vcalloc_req_valid, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_3, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_4, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_0_5, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_0, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_0, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_1, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_2, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_3, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_4, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_0_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_5, // @[InputUnit.scala:170:14]
input io_salloc_req_0_ready, // @[InputUnit.scala:170:14]
output io_salloc_req_0_valid, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14]
output io_out_0_valid, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14]
output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14]
output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [3:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14]
output [2:0] io_debug_va_stall, // @[InputUnit.scala:170:14]
output [2:0] io_debug_sa_stall, // @[InputUnit.scala:170:14]
input io_in_flit_0_valid, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14]
input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14]
output [5:0] io_in_credit_return, // @[InputUnit.scala:170:14]
output [5:0] io_in_vc_free // @[InputUnit.scala:170:14]
);
wire vcalloc_vals_5; // @[InputUnit.scala:266:32]
wire vcalloc_vals_3; // @[InputUnit.scala:266:32]
wire vcalloc_vals_1; // @[InputUnit.scala:266:32]
wire vcalloc_vals_0; // @[InputUnit.scala:266:32]
wire _salloc_arb_io_in_0_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_1_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_3_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_5_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26]
wire [5:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26]
wire _route_arbiter_io_in_1_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_3_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_5_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29]
wire [2:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29]
wire _input_buffer_io_deq_0_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28]
reg [2:0] states_0_g; // @[InputUnit.scala:192:19]
reg states_0_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg states_0_vc_sel_0_0; // @[InputUnit.scala:192:19]
reg [1:0] states_0_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_0_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_0_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_0_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_0_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_1_g; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_0_0; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_0_1; // @[InputUnit.scala:192:19]
reg [1:0] states_1_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_1_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_1_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_1_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_1_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_3_g; // @[InputUnit.scala:192:19]
reg states_3_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg states_3_vc_sel_0_2; // @[InputUnit.scala:192:19]
reg states_3_vc_sel_0_3; // @[InputUnit.scala:192:19]
reg [1:0] states_3_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_3_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_3_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_3_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_3_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_5_g; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_0_4; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_0_5; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [3:0] states_5_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [3:0] states_5_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_egress_node_id; // @[InputUnit.scala:192:19]
wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30]
wire route_arbiter_io_in_0_valid = states_0_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_1_valid = states_1_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_3_valid = states_3_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_5_valid = states_5_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
reg [5:0] mask; // @[InputUnit.scala:250:21]
wire [5:0] _vcalloc_filter_T_3 = {vcalloc_vals_5, 1'h0, vcalloc_vals_3, 1'h0, vcalloc_vals_1, vcalloc_vals_0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32]
wire [11:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 12'h1 : _vcalloc_filter_T_3[1] ? 12'h2 : _vcalloc_filter_T_3[2] ? 12'h4 : _vcalloc_filter_T_3[3] ? 12'h8 : _vcalloc_filter_T_3[4] ? 12'h10 : _vcalloc_filter_T_3[5] ? 12'h20 : vcalloc_vals_0 ? 12'h40 : vcalloc_vals_1 ? 12'h80 : vcalloc_vals_3 ? 12'h200 : {vcalloc_vals_5, 11'h0}; // @[OneHot.scala:85:71]
wire [5:0] vcalloc_sel = vcalloc_filter[5:0] | vcalloc_filter[11:6]; // @[Mux.scala:50:70]
wire io_vcalloc_req_valid_0 = vcalloc_vals_0 | vcalloc_vals_1 | vcalloc_vals_3 | vcalloc_vals_5; // @[package.scala:81:59]
assign vcalloc_vals_0 = states_0_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_1 = states_1_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_3 = states_3_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_5 = states_5_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
wire _GEN_1 = _GEN_0 & vcalloc_sel[0]; // @[Mux.scala:32:36]
wire _GEN_2 = _GEN_0 & vcalloc_sel[1]; // @[Mux.scala:32:36]
wire _GEN_3 = _GEN_0 & vcalloc_sel[3]; // @[Mux.scala:32:36]
wire _GEN_4 = _GEN_0 & vcalloc_sel[5]; // @[Mux.scala:32:36] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{
AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry,
IdRange, RegionType, TransferSizes
}
import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions}
import freechips.rocketchip.util.{
AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase,
CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct
}
import scala.math.max
//These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel
case class TLMasterToSlaveTransferSizes(
// Supports both Acquire+Release of the following two sizes:
acquireT: TransferSizes = TransferSizes.none,
acquireB: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none)
extends TLCommonTransferSizes {
def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .intersect(rhs.acquireT),
acquireB = acquireB .intersect(rhs.acquireB),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint))
def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .mincover(rhs.acquireT),
acquireB = acquireB .mincover(rhs.acquireB),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint))
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(acquireT, "T"),
str(acquireB, "B"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""acquireT = ${acquireT}
|acquireB = ${acquireB}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLMasterToSlaveTransferSizes {
def unknownEmits = TLMasterToSlaveTransferSizes(
acquireT = TransferSizes(1, 4096),
acquireB = TransferSizes(1, 4096),
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096))
def unknownSupports = TLMasterToSlaveTransferSizes()
}
//These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel
case class TLSlaveToMasterTransferSizes(
probe: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none
) extends TLCommonTransferSizes {
def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .intersect(rhs.probe),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint)
)
def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .mincover(rhs.probe),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint)
)
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(probe, "P"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""probe = ${probe}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLSlaveToMasterTransferSizes {
def unknownEmits = TLSlaveToMasterTransferSizes(
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096),
probe = TransferSizes(1, 4096))
def unknownSupports = TLSlaveToMasterTransferSizes()
}
trait TLCommonTransferSizes {
def arithmetic: TransferSizes
def logical: TransferSizes
def get: TransferSizes
def putFull: TransferSizes
def putPartial: TransferSizes
def hint: TransferSizes
}
class TLSlaveParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
setName: Option[String],
val address: Seq[AddressSet],
val regionType: RegionType.T,
val executable: Boolean,
val fifoId: Option[Int],
val supports: TLMasterToSlaveTransferSizes,
val emits: TLSlaveToMasterTransferSizes,
// By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation)
val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData
// If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order
// Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo
val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData
val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck
// ReleaseAck may NEVER be denied
extends SimpleProduct
{
def sortedAddress = address.sorted
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters]
override def productPrefix = "TLSlaveParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 11
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => address
case 2 => resources
case 3 => regionType
case 4 => executable
case 5 => fifoId
case 6 => supports
case 7 => emits
case 8 => alwaysGrantsT
case 9 => mayDenyGet
case 10 => mayDenyPut
case _ => throw new IndexOutOfBoundsException(n.toString)
}
def supportsAcquireT: TransferSizes = supports.acquireT
def supportsAcquireB: TransferSizes = supports.acquireB
def supportsArithmetic: TransferSizes = supports.arithmetic
def supportsLogical: TransferSizes = supports.logical
def supportsGet: TransferSizes = supports.get
def supportsPutFull: TransferSizes = supports.putFull
def supportsPutPartial: TransferSizes = supports.putPartial
def supportsHint: TransferSizes = supports.hint
require (!address.isEmpty, "Address cannot be empty")
address.foreach { a => require (a.finite, "Address must be finite") }
address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)")
require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)")
require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)")
require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)")
require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)")
require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)")
require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT")
// Make sure that the regionType agrees with the capabilities
require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached
require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire
require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet
val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected")
val maxTransfer = List( // Largest supported transfer of all types
supportsAcquireT.max,
supportsAcquireB.max,
supportsArithmetic.max,
supportsLogical.max,
supportsGet.max,
supportsPutFull.max,
supportsPutPartial.max).max
val maxAddress = address.map(_.max).max
val minAlignment = address.map(_.alignment).min
// The device had better not support a transfer larger than its alignment
require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)")
def toResource: ResourceAddress = {
ResourceAddress(address, ResourcePermissions(
r = supportsAcquireB || supportsGet,
w = supportsAcquireT || supportsPutFull,
x = executable,
c = supportsAcquireB,
a = supportsArithmetic && supportsLogical))
}
def findTreeViolation() = nodePath.find {
case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false
case _: SinkNode[_, _, _, _, _] => false
case node => node.inputs.size != 1
}
def isTree = findTreeViolation() == None
def infoString = {
s"""Slave Name = ${name}
|Slave Address = ${address}
|supports = ${supports.infoString}
|
|""".stripMargin
}
def v1copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
new TLSlaveParameters(
setName = setName,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = emits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: Option[String] = setName,
address: Seq[AddressSet] = address,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
fifoId: Option[Int] = fifoId,
supports: TLMasterToSlaveTransferSizes = supports,
emits: TLSlaveToMasterTransferSizes = emits,
alwaysGrantsT: Boolean = alwaysGrantsT,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
@deprecated("Use v1copy instead of copy","")
def copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
v1copy(
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supportsAcquireT = supportsAcquireT,
supportsAcquireB = supportsAcquireB,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
}
object TLSlaveParameters {
def v1(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
{
new TLSlaveParameters(
setName = None,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLSlaveToMasterTransferSizes.unknownEmits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2(
address: Seq[AddressSet],
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Seq(),
name: Option[String] = None,
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
fifoId: Option[Int] = None,
supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports,
emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits,
alwaysGrantsT: Boolean = false,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
}
object TLManagerParameters {
@deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","")
def apply(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
TLSlaveParameters.v1(
address,
resources,
regionType,
executable,
nodePath,
supportsAcquireT,
supportsAcquireB,
supportsArithmetic,
supportsLogical,
supportsGet,
supportsPutFull,
supportsPutPartial,
supportsHint,
mayDenyGet,
mayDenyPut,
alwaysGrantsT,
fifoId,
)
}
case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int])
{
def members = Seq(a, b, c, d)
members.collect { case Some(beatBytes) =>
require (isPow2(beatBytes), "Data channel width must be a power of 2")
}
}
object TLChannelBeatBytes{
def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes(
Some(beatBytes),
Some(beatBytes),
Some(beatBytes),
Some(beatBytes))
def apply(): TLChannelBeatBytes = TLChannelBeatBytes(
None,
None,
None,
None)
}
class TLSlavePortParameters private(
val slaves: Seq[TLSlaveParameters],
val channelBytes: TLChannelBeatBytes,
val endSinkId: Int,
val minLatency: Int,
val responseFields: Seq[BundleFieldBase],
val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
def sortedSlaves = slaves.sortBy(_.sortedAddress.head)
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters]
override def productPrefix = "TLSlavePortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => slaves
case 1 => channelBytes
case 2 => endSinkId
case 3 => minLatency
case 4 => responseFields
case 5 => requestKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!slaves.isEmpty, "Slave ports must have slaves")
require (endSinkId >= 0, "Sink ids cannot be negative")
require (minLatency >= 0, "Minimum required latency cannot be negative")
// Using this API implies you cannot handle mixed-width busses
def beatBytes = {
channelBytes.members.foreach { width =>
require (width.isDefined && width == channelBytes.a)
}
channelBytes.a.get
}
// TODO this should be deprecated
def managers = slaves
def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = {
val relevant = slaves.filter(m => policy(m))
relevant.foreach { m =>
require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ")
}
}
// Bounds on required sizes
def maxAddress = slaves.map(_.maxAddress).max
def maxTransfer = slaves.map(_.maxTransfer).max
def mayDenyGet = slaves.exists(_.mayDenyGet)
def mayDenyPut = slaves.exists(_.mayDenyPut)
// Diplomatically determined operation sizes emitted by all outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _)
// Operation Emitted by at least one outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _)
val allSupportAcquireT = allSupportClaims.acquireT
val allSupportAcquireB = allSupportClaims.acquireB
val allSupportArithmetic = allSupportClaims.arithmetic
val allSupportLogical = allSupportClaims.logical
val allSupportGet = allSupportClaims.get
val allSupportPutFull = allSupportClaims.putFull
val allSupportPutPartial = allSupportClaims.putPartial
val allSupportHint = allSupportClaims.hint
// Operation supported by at least one outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _)
val anySupportAcquireT = !anySupportClaims.acquireT.none
val anySupportAcquireB = !anySupportClaims.acquireB.none
val anySupportArithmetic = !anySupportClaims.arithmetic.none
val anySupportLogical = !anySupportClaims.logical.none
val anySupportGet = !anySupportClaims.get.none
val anySupportPutFull = !anySupportClaims.putFull.none
val anySupportPutPartial = !anySupportClaims.putPartial.none
val anySupportHint = !anySupportClaims.hint.none
// Supporting Acquire means being routable for GrantAck
require ((endSinkId == 0) == !anySupportAcquireB)
// These return Option[TLSlaveParameters] for your convenience
def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address)))
// The safe version will check the entire address
def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _)))
// The fast version assumes the address is valid (you probably want fastProperty instead of this function)
def findFast(address: UInt) = {
val routingMask = AddressDecoder(slaves.map(_.address))
VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _)))
}
// Compute the simplest AddressSets that decide a key
def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = {
val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) =>
k -> vs.flatMap(_._2)
}
val reductionMask = AddressDecoder(groups.map(_._2))
groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) }
}
// Select a property
def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D =
Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) })
// Note: returns the actual fifoId + 1 or 0 if None
def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U)
def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B)
// Does this Port manage this ID/address?
def containsSafe(address: UInt) = findSafe(address).reduce(_ || _)
private def addressHelper(
// setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity
safe: Boolean,
// member filters out the sizes being checked based on the opcode being emitted or supported
member: TLSlaveParameters => TransferSizes,
address: UInt,
lgSize: UInt,
// range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity
range: Option[TransferSizes]): Bool = {
// trim reduces circuit complexity by intersecting checked sizes with the range argument
def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x)
// groupBy returns an unordered map, convert back to Seq and sort the result for determinism
// groupByIntoSeq is turning slaves into trimmed membership sizes
// We are grouping all the slaves by their transfer size where
// if they support the trimmed size then
// member is the type of transfer that you are looking for (What you are trying to filter on)
// When you consider membership, you are trimming the sizes to only the ones that you care about
// you are filtering the slaves based on both whether they support a particular opcode and the size
// Grouping the slaves based on the actual transfer size range they support
// intersecting the range and checking their membership
// FOR SUPPORTCASES instead of returning the list of slaves,
// you are returning a map from transfer size to the set of
// address sets that are supported for that transfer size
// find all the slaves that support a certain type of operation and then group their addresses by the supported size
// for every size there could be multiple address ranges
// safety is a trade off between checking between all possible addresses vs only the addresses
// that are known to have supported sizes
// the trade off is 'checking all addresses is a more expensive circuit but will always give you
// the right answer even if you give it an illegal address'
// the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer
// fast presumes address legality
// This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies.
// In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size.
val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) =>
k -> vs.flatMap(_.address)
}
// safe produces a circuit that compares against all possible addresses,
// whereas fast presumes that the address is legal but uses an efficient address decoder
val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2))
// Simplified creates the most concise possible representation of each cases' address sets based on the mask.
val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) }
simplified.map { case (s, a) =>
// s is a size, you are checking for this size either the size of the operation is in s
// We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire.
((Some(s) == range).B || s.containsLg(lgSize)) &&
a.map(_.contains(address)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range)
def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range)
def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range)
def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range)
def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range)
def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range)
def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range)
def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range)
def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range)
def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range)
def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range)
def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range)
def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range)
def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range)
def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range)
def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range)
def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range)
def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption
def isTree = !slaves.exists(!_.isTree)
def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString
def v1copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
def v2copy(
slaves: Seq[TLSlaveParameters] = slaves,
channelBytes: TLChannelBeatBytes = channelBytes,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = slaves,
channelBytes = channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
v1copy(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
object TLSlavePortParameters {
def v1(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = TLChannelBeatBytes(beatBytes),
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
}
object TLManagerPortParameters {
@deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","")
def apply(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
TLSlavePortParameters.v1(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
class TLMasterParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
val name: String,
val visibility: Seq[AddressSet],
val unusedRegionTypes: Set[RegionType.T],
val executesOnly: Boolean,
val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C.
val supports: TLSlaveToMasterTransferSizes,
val emits: TLMasterToSlaveTransferSizes,
val neverReleasesData: Boolean,
val sourceId: IdRange) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters]
override def productPrefix = "TLMasterParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 10
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => sourceId
case 2 => resources
case 3 => visibility
case 4 => unusedRegionTypes
case 5 => executesOnly
case 6 => requestFifo
case 7 => supports
case 8 => emits
case 9 => neverReleasesData
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!sourceId.isEmpty)
require (!visibility.isEmpty)
require (supports.putFull.contains(supports.putPartial))
// We only support these operations if we support Probe (ie: we're a cache)
require (supports.probe.contains(supports.arithmetic))
require (supports.probe.contains(supports.logical))
require (supports.probe.contains(supports.get))
require (supports.probe.contains(supports.putFull))
require (supports.probe.contains(supports.putPartial))
require (supports.probe.contains(supports.hint))
visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
val maxTransfer = List(
supports.probe.max,
supports.arithmetic.max,
supports.logical.max,
supports.get.max,
supports.putFull.max,
supports.putPartial.max).max
def infoString = {
s"""Master Name = ${name}
|visibility = ${visibility}
|emits = ${emits.infoString}
|sourceId = ${sourceId}
|
|""".stripMargin
}
def v1copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = this.resources,
name = name,
visibility = visibility,
unusedRegionTypes = this.unusedRegionTypes,
executesOnly = this.executesOnly,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = this.emits,
neverReleasesData = this.neverReleasesData,
sourceId = sourceId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: String = name,
visibility: Seq[AddressSet] = visibility,
unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes,
executesOnly: Boolean = executesOnly,
requestFifo: Boolean = requestFifo,
supports: TLSlaveToMasterTransferSizes = supports,
emits: TLMasterToSlaveTransferSizes = emits,
neverReleasesData: Boolean = neverReleasesData,
sourceId: IdRange = sourceId) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
@deprecated("Use v1copy instead of copy","")
def copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
v1copy(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
object TLMasterParameters {
def v1(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = Nil,
name = name,
visibility = visibility,
unusedRegionTypes = Set(),
executesOnly = false,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData = false,
sourceId = sourceId)
}
def v2(
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Nil,
name: String,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
unusedRegionTypes: Set[RegionType.T] = Set(),
executesOnly: Boolean = false,
requestFifo: Boolean = false,
supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports,
emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData: Boolean = false,
sourceId: IdRange = IdRange(0,1)) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
}
object TLClientParameters {
@deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","")
def apply(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet.everything),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
TLMasterParameters.v1(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
class TLMasterPortParameters private(
val masters: Seq[TLMasterParameters],
val channelBytes: TLChannelBeatBytes,
val minLatency: Int,
val echoFields: Seq[BundleFieldBase],
val requestFields: Seq[BundleFieldBase],
val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters]
override def productPrefix = "TLMasterPortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => masters
case 1 => channelBytes
case 2 => minLatency
case 3 => echoFields
case 4 => requestFields
case 5 => responseKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!masters.isEmpty)
require (minLatency >= 0)
def clients = masters
// Require disjoint ranges for Ids
IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) =>
require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}")
}
// Bounds on required sizes
def endSourceId = masters.map(_.sourceId.end).max
def maxTransfer = masters.map(_.maxTransfer).max
// The unused sources < endSourceId
def unusedSources: Seq[Int] = {
val usedSources = masters.map(_.sourceId).sortBy(_.start)
((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) =>
end until start
}
}
// Diplomatically determined operation sizes emitted by all inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = masters.map(_.emits).reduce( _ intersect _)
// Diplomatically determined operation sizes Emitted by at least one inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all inward Masters
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _)
val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _)
val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _)
val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _)
val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _)
val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _)
val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _)
// Diplomatically determined operation sizes supported by at least one master
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _)
val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _)
val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _)
val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _)
val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _)
val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _)
val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _)
// These return Option[TLMasterParameters] for your convenience
def find(id: Int) = masters.find(_.sourceId.contains(id))
// Synthesizable lookup methods
def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id)))
def contains(id: UInt) = find(id).reduce(_ || _)
def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B))
// Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters
private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = {
val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _)
// this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version;
// the case where there is only one group.
if (allSame) member(masters(0)).containsLg(lgSize) else {
// Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize
Mux1H(find(id), masters.map(member(_).containsLg(lgSize)))
}
}
// Check for support of a given operation at a specific id
val supportsProbe = sourceIdHelper(_.supports.probe) _
val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _
val supportsLogical = sourceIdHelper(_.supports.logical) _
val supportsGet = sourceIdHelper(_.supports.get) _
val supportsPutFull = sourceIdHelper(_.supports.putFull) _
val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _
val supportsHint = sourceIdHelper(_.supports.hint) _
// TODO: Merge sourceIdHelper2 with sourceIdHelper
private def sourceIdHelper2(
member: TLMasterParameters => TransferSizes,
sourceId: UInt,
lgSize: UInt): Bool = {
// Because sourceIds are uniquely owned by each master, we use them to group the
// cases that have to be checked.
val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) =>
k -> vs.map(_.sourceId)
}
emitCases.map { case (s, a) =>
(s.containsLg(lgSize)) &&
a.map(_.contains(sourceId)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
// Check for emit of a given operation at a specific id
def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize)
def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize)
def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize)
def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize)
def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize)
def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize)
def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize)
def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize)
def infoString = masters.map(_.infoString).mkString
def v1copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2copy(
masters: Seq[TLMasterParameters] = masters,
channelBytes: TLChannelBeatBytes = channelBytes,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
v1copy(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLClientPortParameters {
@deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","")
def apply(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
TLMasterPortParameters.v1(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLMasterPortParameters {
def v1(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = TLChannelBeatBytes(),
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2(
masters: Seq[TLMasterParameters],
channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(),
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
}
case class TLBundleParameters(
addressBits: Int,
dataBits: Int,
sourceBits: Int,
sinkBits: Int,
sizeBits: Int,
echoFields: Seq[BundleFieldBase],
requestFields: Seq[BundleFieldBase],
responseFields: Seq[BundleFieldBase],
hasBCE: Boolean)
{
// Chisel has issues with 0-width wires
require (addressBits >= 1)
require (dataBits >= 8)
require (sourceBits >= 1)
require (sinkBits >= 1)
require (sizeBits >= 1)
require (isPow2(dataBits))
echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") }
val addrLoBits = log2Up(dataBits/8)
// Used to uniquify bus IP names
def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u")
def union(x: TLBundleParameters) =
TLBundleParameters(
max(addressBits, x.addressBits),
max(dataBits, x.dataBits),
max(sourceBits, x.sourceBits),
max(sinkBits, x.sinkBits),
max(sizeBits, x.sizeBits),
echoFields = BundleField.union(echoFields ++ x.echoFields),
requestFields = BundleField.union(requestFields ++ x.requestFields),
responseFields = BundleField.union(responseFields ++ x.responseFields),
hasBCE || x.hasBCE)
}
object TLBundleParameters
{
val emptyBundleParams = TLBundleParameters(
addressBits = 1,
dataBits = 8,
sourceBits = 1,
sinkBits = 1,
sizeBits = 1,
echoFields = Nil,
requestFields = Nil,
responseFields = Nil,
hasBCE = false)
def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y))
def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) =
new TLBundleParameters(
addressBits = log2Up(slave.maxAddress + 1),
dataBits = slave.beatBytes * 8,
sourceBits = log2Up(master.endSourceId),
sinkBits = log2Up(slave.endSinkId),
sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1),
echoFields = master.echoFields,
requestFields = BundleField.accept(master.requestFields, slave.requestKeys),
responseFields = BundleField.accept(slave.responseFields, master.responseKeys),
hasBCE = master.anySupportProbe && slave.anySupportAcquireB)
}
case class TLEdgeParameters(
master: TLMasterPortParameters,
slave: TLSlavePortParameters,
params: Parameters,
sourceInfo: SourceInfo) extends FormatEdge
{
// legacy names:
def manager = slave
def client = master
val maxTransfer = max(master.maxTransfer, slave.maxTransfer)
val maxLgSize = log2Ceil(maxTransfer)
// Sanity check the link...
require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})")
def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims)
val bundle = TLBundleParameters(master, slave)
def formatEdge = master.infoString + "\n" + slave.infoString
}
case class TLCreditedDelay(
a: CreditedDelay,
b: CreditedDelay,
c: CreditedDelay,
d: CreditedDelay,
e: CreditedDelay)
{
def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay(
a = a + that.a,
b = b + that.b,
c = c + that.c,
d = d + that.d,
e = e + that.e)
override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})"
}
object TLCreditedDelay {
def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay)
}
case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val delay = client.delay + manager.delay
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters)
case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base))
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
// To be unified, devices must agree on all of these terms
case class ManagerUnificationKey(
resources: Seq[Resource],
regionType: RegionType.T,
executable: Boolean,
supportsAcquireT: TransferSizes,
supportsAcquireB: TransferSizes,
supportsArithmetic: TransferSizes,
supportsLogical: TransferSizes,
supportsGet: TransferSizes,
supportsPutFull: TransferSizes,
supportsPutPartial: TransferSizes,
supportsHint: TransferSizes)
object ManagerUnificationKey
{
def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey(
resources = x.resources,
regionType = x.regionType,
executable = x.executable,
supportsAcquireT = x.supportsAcquireT,
supportsAcquireB = x.supportsAcquireB,
supportsArithmetic = x.supportsArithmetic,
supportsLogical = x.supportsLogical,
supportsGet = x.supportsGet,
supportsPutFull = x.supportsPutFull,
supportsPutPartial = x.supportsPutPartial,
supportsHint = x.supportsHint)
}
object ManagerUnification
{
def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = {
slaves.groupBy(ManagerUnificationKey.apply).values.map { seq =>
val agree = seq.forall(_.fifoId == seq.head.fifoId)
seq(0).v1copy(
address = AddressSet.unify(seq.flatMap(_.address)),
fifoId = if (agree) seq(0).fifoId else None)
}.toList
}
}
case class TLBufferParams(
a: BufferParams = BufferParams.none,
b: BufferParams = BufferParams.none,
c: BufferParams = BufferParams.none,
d: BufferParams = BufferParams.none,
e: BufferParams = BufferParams.none
) extends DirectedBuffers[TLBufferParams] {
def copyIn(x: BufferParams) = this.copy(b = x, d = x)
def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x)
def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x)
}
/** Pretty printing of TL source id maps */
class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] {
private val tlDigits = String.valueOf(tl.endSourceId-1).length()
protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s"
private val sorted = tl.masters.sortBy(_.sourceId)
val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c =>
TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo)
}
}
case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = tlId
val maxTransactionsInFlight = Some(tlId.size)
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_41( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [31:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [31:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_source = 1'h0; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_source = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire mask_sizeOH_shiftAmount = 1'h0; // @[OneHot.scala:64:49]
wire mask_sub_size = 1'h0; // @[Misc.scala:209:26]
wire _mask_sub_acc_T = 1'h0; // @[Misc.scala:215:38]
wire _mask_sub_acc_T_1 = 1'h0; // @[Misc.scala:215:38]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count = 1'h0; // @[Edges.scala:234:25]
wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27]
wire c_first_count = 1'h0; // @[Edges.scala:234:25]
wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21]
wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25]
wire c_set = 1'h0; // @[Monitor.scala:738:34]
wire c_set_wo_ready = 1'h0; // @[Monitor.scala:739:34]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire _source_ok_T = 1'h1; // @[Parameters.scala:46:9]
wire _source_ok_WIRE_0 = 1'h1; // @[Parameters.scala:1138:31]
wire mask_sub_sub_0_1 = 1'h1; // @[Misc.scala:206:21]
wire mask_sub_0_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_size = 1'h1; // @[Misc.scala:209:26]
wire mask_acc = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_2 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_3 = 1'h1; // @[Misc.scala:215:29]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:46:9]
wire _source_ok_WIRE_1_0 = 1'h1; // @[Parameters.scala:1138:31]
wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last = 1'h1; // @[Edges.scala:232:33]
wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire _same_cycle_resp_T_2 = 1'h1; // @[Monitor.scala:684:113]
wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33]
wire _same_cycle_resp_T_8 = 1'h1; // @[Monitor.scala:795:113]
wire [1:0] is_aligned_mask = 2'h3; // @[package.scala:243:46]
wire [1:0] mask_lo = 2'h3; // @[Misc.scala:222:10]
wire [1:0] mask_hi = 2'h3; // @[Misc.scala:222:10]
wire [1:0] _a_first_beats1_decode_T_2 = 2'h3; // @[package.scala:243:46]
wire [1:0] _d_first_beats1_decode_T_2 = 2'h3; // @[package.scala:243:46]
wire [1:0] _a_first_beats1_decode_T_5 = 2'h3; // @[package.scala:243:46]
wire [1:0] _d_first_beats1_decode_T_5 = 2'h3; // @[package.scala:243:46]
wire [1:0] _c_first_beats1_decode_T_1 = 2'h3; // @[package.scala:243:76]
wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28]
wire [1:0] _d_first_beats1_decode_T_8 = 2'h3; // @[package.scala:243:46]
wire [1:0] io_in_a_bits_size = 2'h2; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_size = 2'h2; // @[Monitor.scala:36:7]
wire [1:0] _mask_sizeOH_T = 2'h2; // @[Misc.scala:202:34]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [1:0] _is_aligned_mask_T_1 = 2'h0; // @[package.scala:243:76]
wire [1:0] _a_first_beats1_decode_T_1 = 2'h0; // @[package.scala:243:76]
wire [1:0] _d_first_beats1_decode_T_1 = 2'h0; // @[package.scala:243:76]
wire [1:0] _a_first_beats1_decode_T_4 = 2'h0; // @[package.scala:243:76]
wire [1:0] _d_first_beats1_decode_T_4 = 2'h0; // @[package.scala:243:76]
wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_beats1_decode_T_2 = 2'h0; // @[package.scala:243:46]
wire [1:0] _d_first_beats1_decode_T_7 = 2'h0; // @[package.scala:243:76]
wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [2:0] io_in_a_bits_param = 3'h0; // @[Monitor.scala:36:7]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [3:0] io_in_a_bits_mask = 4'hF; // @[Monitor.scala:36:7]
wire [3:0] mask = 4'hF; // @[Misc.scala:222:10]
wire [31:0] _c_first_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_wo_ready_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_wo_ready_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_interm_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_interm_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_interm_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_4_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_5_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_address = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_address = 7'h0; // @[Bundles.scala:265:61]
wire [30:0] _d_opcodes_clr_T_5 = 31'hF; // @[Monitor.scala:680:76]
wire [30:0] _d_sizes_clr_T_5 = 31'hF; // @[Monitor.scala:681:74]
wire [30:0] _d_opcodes_clr_T_11 = 31'hF; // @[Monitor.scala:790:76]
wire [30:0] _d_sizes_clr_T_11 = 31'hF; // @[Monitor.scala:791:74]
wire [3:0] _a_opcode_lookup_T = 4'h0; // @[Monitor.scala:637:69]
wire [3:0] _a_size_lookup_T = 4'h0; // @[Monitor.scala:641:65]
wire [3:0] _a_opcodes_set_T = 4'h0; // @[Monitor.scala:659:79]
wire [3:0] _a_sizes_set_T = 4'h0; // @[Monitor.scala:660:77]
wire [3:0] _d_opcodes_clr_T_4 = 4'h0; // @[Monitor.scala:680:101]
wire [3:0] _d_sizes_clr_T_4 = 4'h0; // @[Monitor.scala:681:99]
wire [3:0] c_opcodes_set = 4'h0; // @[Monitor.scala:740:34]
wire [3:0] c_sizes_set = 4'h0; // @[Monitor.scala:741:34]
wire [3:0] _c_opcode_lookup_T = 4'h0; // @[Monitor.scala:749:69]
wire [3:0] _c_size_lookup_T = 4'h0; // @[Monitor.scala:750:67]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_opcodes_set_T = 4'h0; // @[Monitor.scala:767:79]
wire [3:0] _c_sizes_set_T = 4'h0; // @[Monitor.scala:768:77]
wire [3:0] _d_opcodes_clr_T_10 = 4'h0; // @[Monitor.scala:790:101]
wire [3:0] _d_sizes_clr_T_10 = 4'h0; // @[Monitor.scala:791:99]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1:0] _mask_sizeOH_T_1 = 2'h1; // @[OneHot.scala:65:12]
wire [1:0] _mask_sizeOH_T_2 = 2'h1; // @[OneHot.scala:65:27]
wire [1:0] mask_sizeOH = 2'h1; // @[Misc.scala:202:81]
wire [1:0] _a_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _a_set_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _c_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _c_set_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T_1 = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T_1 = 2'h1; // @[OneHot.scala:58:35]
wire [17:0] _c_sizes_set_T_1 = 18'h0; // @[Monitor.scala:768:52]
wire [18:0] _c_opcodes_set_T_1 = 19'h0; // @[Monitor.scala:767:54]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [4:0] _is_aligned_mask_T = 5'hC; // @[package.scala:243:71]
wire [4:0] _a_first_beats1_decode_T = 5'hC; // @[package.scala:243:71]
wire [4:0] _d_first_beats1_decode_T = 5'hC; // @[package.scala:243:71]
wire [4:0] _a_first_beats1_decode_T_3 = 5'hC; // @[package.scala:243:71]
wire [4:0] _d_first_beats1_decode_T_3 = 5'hC; // @[package.scala:243:71]
wire [4:0] _d_first_beats1_decode_T_6 = 5'hC; // @[package.scala:243:71]
wire [4:0] _c_first_beats1_decode_T = 5'h3; // @[package.scala:243:71]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] _a_sizes_set_interm_T_1 = 3'h5; // @[Monitor.scala:658:59]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] _a_sizes_set_interm_T = 3'h4; // @[Monitor.scala:658:51]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [6:0] _is_aligned_T = {5'h0, io_in_a_bits_address_0[1:0]}; // @[Monitor.scala:36:7]
wire is_aligned = _is_aligned_T == 7'h0; // @[Edges.scala:21:{16,24}]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_1_2 = mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_eq; // @[Misc.scala:214:27, :215:38]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_eq_1; // @[Misc.scala:214:27, :215:38]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_eq_2; // @[Misc.scala:214:27, :215:38]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_eq_3; // @[Misc.scala:214:27, :215:38]
wire _T_658 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_658; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_658; // @[Decoupled.scala:51:35]
wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
reg a_first_counter; // @[Edges.scala:229:27]
wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28]
wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [6:0] address; // @[Monitor.scala:391:22]
wire _T_726 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_726; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_726; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_726; // @[Decoupled.scala:51:35]
wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
reg d_first_counter; // @[Edges.scala:229:27]
wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28]
wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] inflight; // @[Monitor.scala:614:27]
reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35]
wire [3:0] _a_opcode_lookup_T_1 = inflight_opcodes; // @[Monitor.scala:616:35, :637:44]
reg [3:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [3:0] _a_size_lookup_T_1 = inflight_sizes; // @[Monitor.scala:618:33, :641:40]
wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
reg a_first_counter_1; // @[Edges.scala:229:27]
wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire a_set; // @[Monitor.scala:626:34]
wire a_set_wo_ready; // @[Monitor.scala:627:34]
wire [3:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [3:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [15:0] _a_opcode_lookup_T_6 = {12'h0, _a_opcode_lookup_T_1}; // @[Monitor.scala:637:{44,97}]
wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [15:0] _a_size_lookup_T_6 = {12'h0, _a_size_lookup_T_1}; // @[Monitor.scala:637:97, :641:{40,91}]
wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _T_588 = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26]
assign a_set_wo_ready = _T_588; // @[Monitor.scala:627:34, :651:26]
wire _same_cycle_resp_T; // @[Monitor.scala:684:44]
assign _same_cycle_resp_T = _T_588; // @[Monitor.scala:651:26, :684:44]
assign a_set = _T_658 & a_first_1; // @[Decoupled.scala:51:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = a_set ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:626:34, :646:40, :655:70, :657:{28,61}]
assign a_sizes_set_interm = a_set ? 3'h5 : 3'h0; // @[Monitor.scala:626:34, :648:38, :655:70, :658:28]
wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm}; // @[Monitor.scala:646:40, :659:54]
assign a_opcodes_set = a_set ? _a_opcodes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:626:34, :630:33, :655:70, :659:{28,54}]
wire [17:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm}; // @[Monitor.scala:648:38, :659:54, :660:52]
assign a_sizes_set = a_set ? _a_sizes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:626:34, :632:31, :655:70, :660:{28,52}]
wire d_clr; // @[Monitor.scala:664:34]
wire d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [3:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [3:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN; // @[Monitor.scala:673:46, :783:46]
wire _T_637 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
assign d_clr_wo_ready = _T_637 & ~d_release_ack; // @[Monitor.scala:665:34, :673:46, :674:{26,71,74}]
assign d_clr = _T_726 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
wire [3:0] _GEN_0 = {4{d_clr}}; // @[Monitor.scala:664:34, :668:33, :678:89, :680:21]
assign d_opcodes_clr = _GEN_0; // @[Monitor.scala:668:33, :678:89, :680:21]
assign d_sizes_clr = _GEN_0; // @[Monitor.scala:668:33, :670:31, :678:89, :680:21]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire same_cycle_resp = _same_cycle_resp_T_1; // @[Monitor.scala:684:{55,88}]
wire [1:0] _inflight_T = {inflight[1], inflight[0] | a_set}; // @[Monitor.scala:614:27, :626:34, :705:27]
wire _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [1:0] _inflight_T_2 = {1'h0, _inflight_T[0] & _inflight_T_1}; // @[Monitor.scala:705:{27,36,38}]
wire [3:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [3:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [3:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [3:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [3:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [3:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [1:0] inflight_1; // @[Monitor.scala:726:35]
wire [1:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [3:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [3:0] _c_opcode_lookup_T_1 = inflight_opcodes_1; // @[Monitor.scala:727:35, :749:44]
wire [3:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [3:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [3:0] _c_size_lookup_T_1 = inflight_sizes_1; // @[Monitor.scala:728:35, :750:42]
wire [3:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35]
reg d_first_counter_2; // @[Edges.scala:229:27]
wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28]
wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [15:0] _c_opcode_lookup_T_6 = {12'h0, _c_opcode_lookup_T_1}; // @[Monitor.scala:637:97, :749:{44,97}]
wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [15:0] _c_size_lookup_T_6 = {12'h0, _c_size_lookup_T_1}; // @[Monitor.scala:637:97, :750:{42,93}]
wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire d_clr_1; // @[Monitor.scala:774:34]
wire d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [3:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [3:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_702 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_702 & d_release_ack_1; // @[Monitor.scala:775:34, :783:46, :784:{26,71}]
assign d_clr_1 = _T_726 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
wire [3:0] _GEN_1 = {4{d_clr_1}}; // @[Monitor.scala:774:34, :776:34, :788:88, :790:21]
assign d_opcodes_clr_1 = _GEN_1; // @[Monitor.scala:776:34, :788:88, :790:21]
assign d_sizes_clr_1 = _GEN_1; // @[Monitor.scala:776:34, :777:34, :788:88, :790:21]
wire _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [1:0] _inflight_T_5 = {1'h0, _inflight_T_3[0] & _inflight_T_4}; // @[Monitor.scala:814:{35,44,46}]
wire [3:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [3:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [3:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [3:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_51( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [7:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [7:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_33 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_63 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_65 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_77 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_83 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_first_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_first_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_wo_ready_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_wo_ready_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_interm_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_interm_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_opcodes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_opcodes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_sizes_set_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_sizes_set_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _c_probe_ack_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _c_probe_ack_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [7:0] _same_cycle_resp_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:265:74]
wire [7:0] _same_cycle_resp_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [2050:0] _c_opcodes_set_T_1 = 2051'h0; // @[Monitor.scala:767:54]
wire [2050:0] _c_sizes_set_T_1 = 2051'h0; // @[Monitor.scala:768:52]
wire [10:0] _c_opcodes_set_T = 11'h0; // @[Monitor.scala:767:79]
wire [10:0] _c_sizes_set_T = 11'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [255:0] _c_set_wo_ready_T = 256'h1; // @[OneHot.scala:58:35]
wire [255:0] _c_set_T = 256'h1; // @[OneHot.scala:58:35]
wire [515:0] c_opcodes_set = 516'h0; // @[Monitor.scala:740:34]
wire [515:0] c_sizes_set = 516'h0; // @[Monitor.scala:741:34]
wire [128:0] c_set = 129'h0; // @[Monitor.scala:738:34]
wire [128:0] c_set_wo_ready = 129'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [7:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [7:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 8'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] _source_ok_T_13 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_19 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_25 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_31 = io_in_a_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_14 = _source_ok_T_13 == 6'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 6'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_26 = _source_ok_T_25 == 6'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_32 = _source_ok_T_31 == 6'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_34 = _source_ok_T_32; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_6 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire _source_ok_T_37 = io_in_a_bits_source_0 == 8'h41; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_37; // @[Parameters.scala:1138:31]
wire _source_ok_T_38 = io_in_a_bits_source_0 == 8'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31]
wire _source_ok_T_39 = io_in_a_bits_source_0 == 8'h80; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31]
wire _source_ok_T_40 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_41 = _source_ok_T_40 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_47 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {15'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [2:0] uncommonBits = _uncommonBits_T[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_1 = _uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_6 = _uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_7 = _uncommonBits_T_7[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_12 = _uncommonBits_T_12[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_13 = _uncommonBits_T_13[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_18 = _uncommonBits_T_18[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_25 = _uncommonBits_T_25[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_30 = _uncommonBits_T_30[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_31 = _uncommonBits_T_31[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_36 = _uncommonBits_T_36[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_37 = _uncommonBits_T_37[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_42 = _uncommonBits_T_42[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_43 = _uncommonBits_T_43[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_48 = _uncommonBits_T_48[2:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_48 = io_in_d_bits_source_0 == 8'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_48; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[2:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_49 = io_in_d_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_55 = io_in_d_bits_source_0[7:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_50 = _source_ok_T_49 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_54; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_56 = _source_ok_T_55 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_60; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] _source_ok_T_61 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_67 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_73 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire [5:0] _source_ok_T_79 = io_in_d_bits_source_0[7:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_62 = _source_ok_T_61 == 6'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_64 = _source_ok_T_62; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_66 = _source_ok_T_64; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_66; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_68 = _source_ok_T_67 == 6'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_72; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_74 = _source_ok_T_73 == 6'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_78 = _source_ok_T_76; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_5 = _source_ok_T_78; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_80 = _source_ok_T_79 == 6'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_84 = _source_ok_T_82; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_6 = _source_ok_T_84; // @[Parameters.scala:1138:31]
wire _source_ok_T_85 = io_in_d_bits_source_0 == 8'h41; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_85; // @[Parameters.scala:1138:31]
wire _source_ok_T_86 = io_in_d_bits_source_0 == 8'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_86; // @[Parameters.scala:1138:31]
wire _source_ok_T_87 = io_in_d_bits_source_0 == 8'h80; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_9 = _source_ok_T_87; // @[Parameters.scala:1138:31]
wire _source_ok_T_88 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_89 = _source_ok_T_88 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_90 = _source_ok_T_89 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_91 = _source_ok_T_90 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_92 = _source_ok_T_91 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_93 = _source_ok_T_92 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_94 = _source_ok_T_93 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_95 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1115 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1115; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1115; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [7:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_1183 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1183; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1183; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1183; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [7:0] source_1; // @[Monitor.scala:541:22]
reg [128:0] inflight; // @[Monitor.scala:614:27]
reg [515:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [515:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [128:0] a_set; // @[Monitor.scala:626:34]
wire [128:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [515:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [515:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [10:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [10:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [10:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [10:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [10:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [10:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [10:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [10:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [10:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [515:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [515:0] _a_opcode_lookup_T_6 = {512'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [515:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [515:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [515:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [515:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[515:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [255:0] _GEN_2 = 256'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [255:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1048 = _T_1115 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1048 ? _a_set_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1048 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1048 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [10:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [10:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [10:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [2050:0] _a_opcodes_set_T_1 = {2047'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1048 ? _a_opcodes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [2050:0] _a_sizes_set_T_1 = {2047'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1048 ? _a_sizes_set_T_1[515:0] : 516'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [128:0] d_clr; // @[Monitor.scala:664:34]
wire [128:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [515:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [515:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1094 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [255:0] _GEN_5 = 256'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [255:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1094 & ~d_release_ack ? _d_clr_wo_ready_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1063 = _T_1183 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1063 ? _d_clr_T[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_5 = 2063'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1063 ? _d_opcodes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [2062:0] _d_sizes_clr_T_5 = 2063'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1063 ? _d_sizes_clr_T_5[515:0] : 516'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [128:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [128:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [128:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [515:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [515:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [515:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [515:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [515:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [515:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [128:0] inflight_1; // @[Monitor.scala:726:35]
wire [128:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [515:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [515:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [515:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [515:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [515:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [515:0] _c_opcode_lookup_T_6 = {512'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [515:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[515:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [515:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [515:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [515:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[515:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [128:0] d_clr_1; // @[Monitor.scala:774:34]
wire [128:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [515:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [515:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1159 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1159 & d_release_ack_1 ? _d_clr_wo_ready_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire _T_1141 = _T_1183 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1141 ? _d_clr_T_1[128:0] : 129'h0; // @[OneHot.scala:58:35]
wire [2062:0] _d_opcodes_clr_T_11 = 2063'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1141 ? _d_opcodes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [2062:0] _d_sizes_clr_T_11 = 2063'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1141 ? _d_sizes_clr_T_11[515:0] : 516'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 8'h0; // @[Monitor.scala:36:7, :795:113]
wire [128:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [128:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [515:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [515:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [515:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [515:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_96( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_156 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputBuffer_7( // @[InputUnit.scala:49:7]
input clock, // @[InputUnit.scala:49:7]
input reset, // @[InputUnit.scala:49:7]
input io_enq_0_valid, // @[InputUnit.scala:51:14]
input io_enq_0_bits_head, // @[InputUnit.scala:51:14]
input io_enq_0_bits_tail, // @[InputUnit.scala:51:14]
input [72:0] io_enq_0_bits_payload, // @[InputUnit.scala:51:14]
input [4:0] io_enq_0_bits_virt_channel_id, // @[InputUnit.scala:51:14]
output io_deq_0_bits_head, // @[InputUnit.scala:51:14]
output io_deq_0_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_0_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_1_bits_head, // @[InputUnit.scala:51:14]
output io_deq_1_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_1_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_2_bits_head, // @[InputUnit.scala:51:14]
output io_deq_2_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_2_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_3_bits_head, // @[InputUnit.scala:51:14]
output io_deq_3_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_3_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_4_bits_head, // @[InputUnit.scala:51:14]
output io_deq_4_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_4_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_5_bits_head, // @[InputUnit.scala:51:14]
output io_deq_5_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_5_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_6_bits_head, // @[InputUnit.scala:51:14]
output io_deq_6_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_6_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_7_bits_head, // @[InputUnit.scala:51:14]
output io_deq_7_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_7_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_8_ready, // @[InputUnit.scala:51:14]
output io_deq_8_valid, // @[InputUnit.scala:51:14]
output io_deq_8_bits_head, // @[InputUnit.scala:51:14]
output io_deq_8_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_8_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_9_ready, // @[InputUnit.scala:51:14]
output io_deq_9_valid, // @[InputUnit.scala:51:14]
output io_deq_9_bits_head, // @[InputUnit.scala:51:14]
output io_deq_9_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_9_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_10_bits_head, // @[InputUnit.scala:51:14]
output io_deq_10_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_10_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_11_bits_head, // @[InputUnit.scala:51:14]
output io_deq_11_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_11_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_12_ready, // @[InputUnit.scala:51:14]
output io_deq_12_valid, // @[InputUnit.scala:51:14]
output io_deq_12_bits_head, // @[InputUnit.scala:51:14]
output io_deq_12_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_12_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_13_ready, // @[InputUnit.scala:51:14]
output io_deq_13_valid, // @[InputUnit.scala:51:14]
output io_deq_13_bits_head, // @[InputUnit.scala:51:14]
output io_deq_13_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_13_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_14_bits_head, // @[InputUnit.scala:51:14]
output io_deq_14_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_14_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_15_bits_head, // @[InputUnit.scala:51:14]
output io_deq_15_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_15_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_16_bits_head, // @[InputUnit.scala:51:14]
output io_deq_16_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_16_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_17_bits_head, // @[InputUnit.scala:51:14]
output io_deq_17_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_17_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_18_bits_head, // @[InputUnit.scala:51:14]
output io_deq_18_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_18_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_19_bits_head, // @[InputUnit.scala:51:14]
output io_deq_19_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_19_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_20_ready, // @[InputUnit.scala:51:14]
output io_deq_20_valid, // @[InputUnit.scala:51:14]
output io_deq_20_bits_head, // @[InputUnit.scala:51:14]
output io_deq_20_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_20_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_21_ready, // @[InputUnit.scala:51:14]
output io_deq_21_valid, // @[InputUnit.scala:51:14]
output io_deq_21_bits_head, // @[InputUnit.scala:51:14]
output io_deq_21_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_21_bits_payload // @[InputUnit.scala:51:14]
);
wire _qs_21_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_20_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_19_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_18_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_17_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_16_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_15_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_14_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_13_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_12_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_11_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_10_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_9_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_8_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_7_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_6_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_5_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_4_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_3_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_2_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_1_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_0_io_enq_ready; // @[InputUnit.scala:90:49]
wire [74:0] _mem_ext_R0_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R1_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R2_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R3_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R4_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R5_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R6_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R7_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R8_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R9_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R10_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R11_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R12_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R13_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R14_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R15_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R16_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R17_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R18_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R19_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R20_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R21_data; // @[InputUnit.scala:85:18]
reg [4:0] heads_0; // @[InputUnit.scala:86:24]
reg [4:0] heads_1; // @[InputUnit.scala:86:24]
reg [4:0] heads_2; // @[InputUnit.scala:86:24]
reg [4:0] heads_3; // @[InputUnit.scala:86:24]
reg [4:0] heads_4; // @[InputUnit.scala:86:24]
reg [4:0] heads_5; // @[InputUnit.scala:86:24]
reg [4:0] heads_6; // @[InputUnit.scala:86:24]
reg [4:0] heads_7; // @[InputUnit.scala:86:24]
reg [4:0] heads_8; // @[InputUnit.scala:86:24]
reg [4:0] heads_9; // @[InputUnit.scala:86:24]
reg [4:0] heads_10; // @[InputUnit.scala:86:24]
reg [4:0] heads_11; // @[InputUnit.scala:86:24]
reg [4:0] heads_12; // @[InputUnit.scala:86:24]
reg [4:0] heads_13; // @[InputUnit.scala:86:24]
reg [4:0] heads_14; // @[InputUnit.scala:86:24]
reg [4:0] heads_15; // @[InputUnit.scala:86:24]
reg [4:0] heads_16; // @[InputUnit.scala:86:24]
reg [4:0] heads_17; // @[InputUnit.scala:86:24]
reg [4:0] heads_18; // @[InputUnit.scala:86:24]
reg [4:0] heads_19; // @[InputUnit.scala:86:24]
reg [4:0] heads_20; // @[InputUnit.scala:86:24]
reg [4:0] heads_21; // @[InputUnit.scala:86:24]
reg [4:0] tails_0; // @[InputUnit.scala:87:24]
reg [4:0] tails_1; // @[InputUnit.scala:87:24]
reg [4:0] tails_2; // @[InputUnit.scala:87:24]
reg [4:0] tails_3; // @[InputUnit.scala:87:24]
reg [4:0] tails_4; // @[InputUnit.scala:87:24]
reg [4:0] tails_5; // @[InputUnit.scala:87:24]
reg [4:0] tails_6; // @[InputUnit.scala:87:24]
reg [4:0] tails_7; // @[InputUnit.scala:87:24]
reg [4:0] tails_8; // @[InputUnit.scala:87:24]
reg [4:0] tails_9; // @[InputUnit.scala:87:24]
reg [4:0] tails_10; // @[InputUnit.scala:87:24]
reg [4:0] tails_11; // @[InputUnit.scala:87:24]
reg [4:0] tails_12; // @[InputUnit.scala:87:24]
reg [4:0] tails_13; // @[InputUnit.scala:87:24]
reg [4:0] tails_14; // @[InputUnit.scala:87:24]
reg [4:0] tails_15; // @[InputUnit.scala:87:24]
reg [4:0] tails_16; // @[InputUnit.scala:87:24]
reg [4:0] tails_17; // @[InputUnit.scala:87:24]
reg [4:0] tails_18; // @[InputUnit.scala:87:24]
reg [4:0] tails_19; // @[InputUnit.scala:87:24]
reg [4:0] tails_20; // @[InputUnit.scala:87:24]
reg [4:0] tails_21; // @[InputUnit.scala:87:24]
wire _tails_T_66 = io_enq_0_bits_virt_channel_id == 5'h0; // @[Mux.scala:32:36]
wire _tails_T_67 = io_enq_0_bits_virt_channel_id == 5'h1; // @[Mux.scala:32:36]
wire _tails_T_68 = io_enq_0_bits_virt_channel_id == 5'h2; // @[Mux.scala:32:36]
wire _tails_T_69 = io_enq_0_bits_virt_channel_id == 5'h3; // @[Mux.scala:32:36]
wire _tails_T_70 = io_enq_0_bits_virt_channel_id == 5'h4; // @[Mux.scala:32:36]
wire _tails_T_71 = io_enq_0_bits_virt_channel_id == 5'h5; // @[Mux.scala:32:36]
wire _tails_T_72 = io_enq_0_bits_virt_channel_id == 5'h6; // @[Mux.scala:32:36]
wire _tails_T_73 = io_enq_0_bits_virt_channel_id == 5'h7; // @[Mux.scala:32:36]
wire _tails_T_74 = io_enq_0_bits_virt_channel_id == 5'h8; // @[Mux.scala:32:36]
wire _tails_T_75 = io_enq_0_bits_virt_channel_id == 5'h9; // @[Mux.scala:32:36]
wire _tails_T_76 = io_enq_0_bits_virt_channel_id == 5'hA; // @[Mux.scala:32:36]
wire _tails_T_77 = io_enq_0_bits_virt_channel_id == 5'hB; // @[Mux.scala:32:36]
wire _tails_T_78 = io_enq_0_bits_virt_channel_id == 5'hC; // @[Mux.scala:32:36]
wire _tails_T_79 = io_enq_0_bits_virt_channel_id == 5'hD; // @[Mux.scala:32:36]
wire _tails_T_80 = io_enq_0_bits_virt_channel_id == 5'hE; // @[Mux.scala:32:36]
wire _tails_T_81 = io_enq_0_bits_virt_channel_id == 5'hF; // @[Mux.scala:32:36]
wire _tails_T_82 = io_enq_0_bits_virt_channel_id == 5'h10; // @[Mux.scala:32:36]
wire _tails_T_83 = io_enq_0_bits_virt_channel_id == 5'h11; // @[Mux.scala:32:36]
wire _tails_T_84 = io_enq_0_bits_virt_channel_id == 5'h12; // @[Mux.scala:32:36]
wire _tails_T_85 = io_enq_0_bits_virt_channel_id == 5'h13; // @[Mux.scala:32:36]
wire _tails_T_86 = io_enq_0_bits_virt_channel_id == 5'h14; // @[Mux.scala:32:36]
wire _tails_T_87 = io_enq_0_bits_virt_channel_id == 5'h15; // @[Mux.scala:32:36]
wire direct_to_q = (_tails_T_66 & _qs_0_io_enq_ready | _tails_T_67 & _qs_1_io_enq_ready | _tails_T_68 & _qs_2_io_enq_ready | _tails_T_69 & _qs_3_io_enq_ready | _tails_T_70 & _qs_4_io_enq_ready | _tails_T_71 & _qs_5_io_enq_ready | _tails_T_72 & _qs_6_io_enq_ready | _tails_T_73 & _qs_7_io_enq_ready | _tails_T_74 & _qs_8_io_enq_ready | _tails_T_75 & _qs_9_io_enq_ready | _tails_T_76 & _qs_10_io_enq_ready | _tails_T_77 & _qs_11_io_enq_ready | _tails_T_78 & _qs_12_io_enq_ready | _tails_T_79 & _qs_13_io_enq_ready | _tails_T_80 & _qs_14_io_enq_ready | _tails_T_81 & _qs_15_io_enq_ready | _tails_T_82 & _qs_16_io_enq_ready | _tails_T_83 & _qs_17_io_enq_ready | _tails_T_84 & _qs_18_io_enq_ready | _tails_T_85 & _qs_19_io_enq_ready | _tails_T_86 & _qs_20_io_enq_ready | _tails_T_87 & _qs_21_io_enq_ready) & (_tails_T_66 & heads_0 == tails_0 | _tails_T_67 & heads_1 == tails_1 | _tails_T_68 & heads_2 == tails_2 | _tails_T_69 & heads_3 == tails_3 | _tails_T_70 & heads_4 == tails_4 | _tails_T_71 & heads_5 == tails_5 | _tails_T_72 & heads_6 == tails_6 | _tails_T_73 & heads_7 == tails_7 | _tails_T_74 & heads_8 == tails_8 | _tails_T_75 & heads_9 == tails_9 | _tails_T_76 & heads_10 == tails_10 | _tails_T_77 & heads_11 == tails_11 | _tails_T_78 & heads_12 == tails_12 | _tails_T_79 & heads_13 == tails_13 | _tails_T_80 & heads_14 == tails_14 | _tails_T_81 & heads_15 == tails_15 | _tails_T_82 & heads_16 == tails_16 | _tails_T_83 & heads_17 == tails_17 | _tails_T_84 & heads_18 == tails_18 | _tails_T_85 & heads_19 == tails_19 | _tails_T_86 & heads_20 == tails_20 | _tails_T_87 & heads_21 == tails_21); // @[Mux.scala:30:73, :32:36]
wire mem_MPORT_en = io_enq_0_valid & ~direct_to_q; // @[InputUnit.scala:96:62, :100:{27,30}]
wire [31:0][4:0] _GEN = {{tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_21}, {tails_20}, {tails_19}, {tails_18}, {tails_17}, {tails_16}, {tails_15}, {tails_14}, {tails_13}, {tails_12}, {tails_11}, {tails_10}, {tails_9}, {tails_8}, {tails_7}, {tails_6}, {tails_5}, {tails_4}, {tails_3}, {tails_2}, {tails_1}, {tails_0}}; // @[InputUnit.scala:87:24, :102:16]
wire _GEN_0 = io_enq_0_bits_virt_channel_id == 5'h0; // @[InputUnit.scala:103:45]
wire _GEN_1 = io_enq_0_bits_virt_channel_id == 5'h1; // @[InputUnit.scala:103:45]
wire _GEN_2 = io_enq_0_bits_virt_channel_id == 5'h2; // @[InputUnit.scala:103:45]
wire _GEN_3 = io_enq_0_bits_virt_channel_id == 5'h3; // @[InputUnit.scala:103:45]
wire _GEN_4 = io_enq_0_bits_virt_channel_id == 5'h4; // @[InputUnit.scala:103:45]
wire _GEN_5 = io_enq_0_bits_virt_channel_id == 5'h5; // @[InputUnit.scala:103:45]
wire _GEN_6 = io_enq_0_bits_virt_channel_id == 5'h6; // @[InputUnit.scala:103:45]
wire _GEN_7 = io_enq_0_bits_virt_channel_id == 5'h7; // @[InputUnit.scala:103:45]
wire _GEN_8 = io_enq_0_bits_virt_channel_id == 5'h8; // @[InputUnit.scala:103:45]
wire _GEN_9 = io_enq_0_bits_virt_channel_id == 5'h9; // @[InputUnit.scala:103:45]
wire _GEN_10 = io_enq_0_bits_virt_channel_id == 5'hA; // @[InputUnit.scala:103:45]
wire _GEN_11 = io_enq_0_bits_virt_channel_id == 5'hB; // @[InputUnit.scala:103:45]
wire _GEN_12 = io_enq_0_bits_virt_channel_id == 5'hC; // @[InputUnit.scala:103:45]
wire _GEN_13 = io_enq_0_bits_virt_channel_id == 5'hD; // @[InputUnit.scala:103:45]
wire _GEN_14 = io_enq_0_bits_virt_channel_id == 5'hE; // @[InputUnit.scala:103:45]
wire _GEN_15 = io_enq_0_bits_virt_channel_id == 5'hF; // @[InputUnit.scala:103:45]
wire _GEN_16 = io_enq_0_bits_virt_channel_id == 5'h10; // @[InputUnit.scala:103:45]
wire _GEN_17 = io_enq_0_bits_virt_channel_id == 5'h11; // @[InputUnit.scala:103:45]
wire _GEN_18 = io_enq_0_bits_virt_channel_id == 5'h12; // @[InputUnit.scala:103:45]
wire _GEN_19 = io_enq_0_bits_virt_channel_id == 5'h13; // @[InputUnit.scala:103:45]
wire _GEN_20 = io_enq_0_bits_virt_channel_id == 5'h14; // @[InputUnit.scala:103:45]
wire _GEN_21 = io_enq_0_bits_virt_channel_id == 5'h15; // @[InputUnit.scala:103:45]
wire _GEN_22 = io_enq_0_valid & direct_to_q; // @[InputUnit.scala:96:62, :107:34]
wire can_to_q_0 = heads_0 != tails_0 & _qs_0_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_1 = heads_1 != tails_1 & _qs_1_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_2 = heads_2 != tails_2 & _qs_2_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_3 = heads_3 != tails_3 & _qs_3_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_4 = heads_4 != tails_4 & _qs_4_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_5 = heads_5 != tails_5 & _qs_5_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_6 = heads_6 != tails_6 & _qs_6_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_7 = heads_7 != tails_7 & _qs_7_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_8 = heads_8 != tails_8 & _qs_8_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_9 = heads_9 != tails_9 & _qs_9_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_10 = heads_10 != tails_10 & _qs_10_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_11 = heads_11 != tails_11 & _qs_11_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_12 = heads_12 != tails_12 & _qs_12_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_13 = heads_13 != tails_13 & _qs_13_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_14 = heads_14 != tails_14 & _qs_14_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_15 = heads_15 != tails_15 & _qs_15_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_16 = heads_16 != tails_16 & _qs_16_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_17 = heads_17 != tails_17 & _qs_17_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_18 = heads_18 != tails_18 & _qs_18_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_19 = heads_19 != tails_19 & _qs_19_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_20 = heads_20 != tails_20 & _qs_20_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_21 = heads_21 != tails_21 & _qs_21_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire [21:0] to_q_oh_enc = can_to_q_0 ? 22'h1 : can_to_q_1 ? 22'h2 : can_to_q_2 ? 22'h4 : can_to_q_3 ? 22'h8 : can_to_q_4 ? 22'h10 : can_to_q_5 ? 22'h20 : can_to_q_6 ? 22'h40 : can_to_q_7 ? 22'h80 : can_to_q_8 ? 22'h100 : can_to_q_9 ? 22'h200 : can_to_q_10 ? 22'h400 : can_to_q_11 ? 22'h800 : can_to_q_12 ? 22'h1000 : can_to_q_13 ? 22'h2000 : can_to_q_14 ? 22'h4000 : can_to_q_15 ? 22'h8000 : can_to_q_16 ? 22'h10000 : can_to_q_17 ? 22'h20000 : can_to_q_18 ? 22'h40000 : can_to_q_19 ? 22'h80000 : can_to_q_20 ? 22'h100000 : {can_to_q_21, 21'h0}; // @[Mux.scala:50:70]
wire _GEN_23 = can_to_q_0 | can_to_q_1 | can_to_q_2 | can_to_q_3 | can_to_q_4 | can_to_q_5 | can_to_q_6 | can_to_q_7 | can_to_q_8 | can_to_q_9 | can_to_q_10 | can_to_q_11 | can_to_q_12 | can_to_q_13 | can_to_q_14 | can_to_q_15 | can_to_q_16 | can_to_q_17 | can_to_q_18 | can_to_q_19 | can_to_q_20 | can_to_q_21; // @[package.scala:81:59]
wire [4:0] head = (to_q_oh_enc[0] ? heads_0 : 5'h0) | (to_q_oh_enc[1] ? heads_1 : 5'h0) | (to_q_oh_enc[2] ? heads_2 : 5'h0) | (to_q_oh_enc[3] ? heads_3 : 5'h0) | (to_q_oh_enc[4] ? heads_4 : 5'h0) | (to_q_oh_enc[5] ? heads_5 : 5'h0) | (to_q_oh_enc[6] ? heads_6 : 5'h0) | (to_q_oh_enc[7] ? heads_7 : 5'h0) | (to_q_oh_enc[8] ? heads_8 : 5'h0) | (to_q_oh_enc[9] ? heads_9 : 5'h0) | (to_q_oh_enc[10] ? heads_10 : 5'h0) | (to_q_oh_enc[11] ? heads_11 : 5'h0) | (to_q_oh_enc[12] ? heads_12 : 5'h0) | (to_q_oh_enc[13] ? heads_13 : 5'h0) | (to_q_oh_enc[14] ? heads_14 : 5'h0) | (to_q_oh_enc[15] ? heads_15 : 5'h0) | (to_q_oh_enc[16] ? heads_16 : 5'h0) | (to_q_oh_enc[17] ? heads_17 : 5'h0) | (to_q_oh_enc[18] ? heads_18 : 5'h0) | (to_q_oh_enc[19] ? heads_19 : 5'h0) | (to_q_oh_enc[20] ? heads_20 : 5'h0) | (to_q_oh_enc[21] ? heads_21 : 5'h0); // @[OneHot.scala:83:30]
wire _GEN_24 = _GEN_23 & to_q_oh_enc[0]; // @[OneHot.scala:83:30]
wire _GEN_25 = _GEN_23 & to_q_oh_enc[1]; // @[OneHot.scala:83:30]
wire _GEN_26 = _GEN_23 & to_q_oh_enc[2]; // @[OneHot.scala:83:30]
wire _GEN_27 = _GEN_23 & to_q_oh_enc[3]; // @[OneHot.scala:83:30]
wire _GEN_28 = _GEN_23 & to_q_oh_enc[4]; // @[OneHot.scala:83:30]
wire _GEN_29 = _GEN_23 & to_q_oh_enc[5]; // @[OneHot.scala:83:30]
wire _GEN_30 = _GEN_23 & to_q_oh_enc[6]; // @[OneHot.scala:83:30]
wire _GEN_31 = _GEN_23 & to_q_oh_enc[7]; // @[OneHot.scala:83:30]
wire _GEN_32 = _GEN_23 & to_q_oh_enc[8]; // @[OneHot.scala:83:30]
wire _GEN_33 = _GEN_23 & to_q_oh_enc[9]; // @[OneHot.scala:83:30]
wire _GEN_34 = _GEN_23 & to_q_oh_enc[10]; // @[OneHot.scala:83:30]
wire _GEN_35 = _GEN_23 & to_q_oh_enc[11]; // @[OneHot.scala:83:30]
wire _GEN_36 = _GEN_23 & to_q_oh_enc[12]; // @[OneHot.scala:83:30]
wire _GEN_37 = _GEN_23 & to_q_oh_enc[13]; // @[OneHot.scala:83:30]
wire _GEN_38 = _GEN_23 & to_q_oh_enc[14]; // @[OneHot.scala:83:30]
wire _GEN_39 = _GEN_23 & to_q_oh_enc[15]; // @[OneHot.scala:83:30]
wire _GEN_40 = _GEN_23 & to_q_oh_enc[16]; // @[OneHot.scala:83:30]
wire _GEN_41 = _GEN_23 & to_q_oh_enc[17]; // @[OneHot.scala:83:30]
wire _GEN_42 = _GEN_23 & to_q_oh_enc[18]; // @[OneHot.scala:83:30]
wire _GEN_43 = _GEN_23 & to_q_oh_enc[19]; // @[OneHot.scala:83:30]
wire _GEN_44 = _GEN_23 & to_q_oh_enc[20]; // @[OneHot.scala:83:30]
wire _GEN_45 = _GEN_23 & to_q_oh_enc[21]; // @[OneHot.scala:83:30]
wire [4:0] _tails_T_133 = _GEN[io_enq_0_bits_virt_channel_id] == ({1'h0, {1'h0, {1'h0, {2{_tails_T_74}}} | {3{_tails_T_75}}} | (_tails_T_78 ? 4'hB : 4'h0) | {4{_tails_T_79}}} | (_tails_T_86 ? 5'h13 : 5'h0) | (_tails_T_87 ? 5'h17 : 5'h0)) ? {_tails_T_86, {_tails_T_78, _tails_T_75, 2'h0} | (_tails_T_79 ? 4'hC : 4'h0)} | (_tails_T_87 ? 5'h14 : 5'h0) : _GEN[io_enq_0_bits_virt_channel_id] + 5'h1; // @[Mux.scala:30:73, :32:36]
wire [14:0] _to_q_T_2 = {10'h0, to_q_oh_enc[21:17]} | to_q_oh_enc[15:1]; // @[OneHot.scala:31:18, :32:28]
wire [6:0] _to_q_T_4 = _to_q_T_2[14:8] | _to_q_T_2[6:0]; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [2:0] _to_q_T_6 = _to_q_T_4[6:4] | _to_q_T_4[2:0]; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _to_q_T_8 = _to_q_T_6[2] | _to_q_T_6[0]; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [4:0] to_q = {|(to_q_oh_enc[21:16]), |(_to_q_T_2[14:7]), |(_to_q_T_4[6:3]), |(_to_q_T_6[2:1]), _to_q_T_8}; // @[OneHot.scala:30:18, :32:{10,14,28}]
wire [4:0] _heads_T_89 = head == ({1'h0, {1'h0, {1'h0, {2{to_q_oh_enc[8]}}} | {3{to_q_oh_enc[9]}}} | (to_q_oh_enc[12] ? 4'hB : 4'h0) | {4{to_q_oh_enc[13]}}} | (to_q_oh_enc[20] ? 5'h13 : 5'h0) | (to_q_oh_enc[21] ? 5'h17 : 5'h0)) ? {to_q_oh_enc[20], {to_q_oh_enc[12], to_q_oh_enc[9], 2'h0} | (to_q_oh_enc[13] ? 4'hC : 4'h0)} | (to_q_oh_enc[21] ? 5'h14 : 5'h0) : head + 5'h1; // @[OneHot.scala:83:30]
always @(posedge clock) begin // @[InputUnit.scala:49:7]
if (reset) begin // @[InputUnit.scala:49:7]
heads_0 <= 5'h0; // @[InputUnit.scala:86:24]
heads_1 <= 5'h0; // @[InputUnit.scala:86:24]
heads_2 <= 5'h0; // @[InputUnit.scala:86:24]
heads_3 <= 5'h0; // @[InputUnit.scala:86:24]
heads_4 <= 5'h0; // @[InputUnit.scala:86:24]
heads_5 <= 5'h0; // @[InputUnit.scala:86:24]
heads_6 <= 5'h0; // @[InputUnit.scala:86:24]
heads_7 <= 5'h0; // @[InputUnit.scala:86:24]
heads_8 <= 5'h0; // @[InputUnit.scala:86:24]
heads_9 <= 5'h4; // @[InputUnit.scala:86:24]
heads_10 <= 5'h0; // @[InputUnit.scala:86:24]
heads_11 <= 5'h0; // @[InputUnit.scala:86:24]
heads_12 <= 5'h8; // @[InputUnit.scala:86:24]
heads_13 <= 5'hC; // @[InputUnit.scala:86:24]
heads_14 <= 5'h0; // @[InputUnit.scala:86:24]
heads_15 <= 5'h0; // @[InputUnit.scala:86:24]
heads_16 <= 5'h0; // @[InputUnit.scala:86:24]
heads_17 <= 5'h0; // @[InputUnit.scala:86:24]
heads_18 <= 5'h0; // @[InputUnit.scala:86:24]
heads_19 <= 5'h0; // @[InputUnit.scala:86:24]
heads_20 <= 5'h10; // @[InputUnit.scala:86:24]
heads_21 <= 5'h14; // @[InputUnit.scala:86:24]
tails_0 <= 5'h0; // @[InputUnit.scala:87:24]
tails_1 <= 5'h0; // @[InputUnit.scala:87:24]
tails_2 <= 5'h0; // @[InputUnit.scala:87:24]
tails_3 <= 5'h0; // @[InputUnit.scala:87:24]
tails_4 <= 5'h0; // @[InputUnit.scala:87:24]
tails_5 <= 5'h0; // @[InputUnit.scala:87:24]
tails_6 <= 5'h0; // @[InputUnit.scala:87:24]
tails_7 <= 5'h0; // @[InputUnit.scala:87:24]
tails_8 <= 5'h0; // @[InputUnit.scala:87:24]
tails_9 <= 5'h4; // @[InputUnit.scala:87:24]
tails_10 <= 5'h0; // @[InputUnit.scala:87:24]
tails_11 <= 5'h0; // @[InputUnit.scala:87:24]
tails_12 <= 5'h8; // @[InputUnit.scala:87:24]
tails_13 <= 5'hC; // @[InputUnit.scala:87:24]
tails_14 <= 5'h0; // @[InputUnit.scala:87:24]
tails_15 <= 5'h0; // @[InputUnit.scala:87:24]
tails_16 <= 5'h0; // @[InputUnit.scala:87:24]
tails_17 <= 5'h0; // @[InputUnit.scala:87:24]
tails_18 <= 5'h0; // @[InputUnit.scala:87:24]
tails_19 <= 5'h0; // @[InputUnit.scala:87:24]
tails_20 <= 5'h10; // @[InputUnit.scala:87:24]
tails_21 <= 5'h14; // @[InputUnit.scala:87:24]
end
else begin // @[InputUnit.scala:49:7]
if (_GEN_23 & {to_q_oh_enc[21:16], |(_to_q_T_2[14:7]), |(_to_q_T_4[6:3]), |(_to_q_T_6[2:1]), _to_q_T_8} == 10'h0) // @[OneHot.scala:30:18, :32:{10,14,28}]
heads_0 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h1) // @[OneHot.scala:32:10]
heads_1 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h2) // @[OneHot.scala:32:10]
heads_2 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h3) // @[OneHot.scala:32:10]
heads_3 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h4) // @[OneHot.scala:32:10]
heads_4 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h5) // @[OneHot.scala:32:10]
heads_5 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h6) // @[OneHot.scala:32:10]
heads_6 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h7) // @[OneHot.scala:32:10]
heads_7 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h8) // @[OneHot.scala:32:10]
heads_8 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h9) // @[OneHot.scala:32:10]
heads_9 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hA) // @[OneHot.scala:32:10]
heads_10 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hB) // @[OneHot.scala:32:10]
heads_11 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hC) // @[OneHot.scala:32:10]
heads_12 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hD) // @[OneHot.scala:32:10]
heads_13 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hE) // @[OneHot.scala:32:10]
heads_14 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hF) // @[OneHot.scala:32:10]
heads_15 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h10) // @[OneHot.scala:32:10]
heads_16 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h11) // @[OneHot.scala:32:10]
heads_17 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h12) // @[OneHot.scala:32:10]
heads_18 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h13) // @[OneHot.scala:32:10]
heads_19 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h14) // @[OneHot.scala:32:10]
heads_20 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h15) // @[OneHot.scala:32:10]
heads_21 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (mem_MPORT_en & _GEN_0) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_0 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_1) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_1 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_2) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_2 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_3) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_3 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_4) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_4 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_5) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_5 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_6) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_6 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_7) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_7 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_8) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_8 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_9) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_9 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_10) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_10 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_11) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_11 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_12) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_12 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_13) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_13 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_14) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_14 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_15) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_15 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_16) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_16 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_17) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_17 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_18) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_18 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_19) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_19 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_20) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_20 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_21) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_21 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
end
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Transposer.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
trait Transposer[T <: Data] extends Module {
def dim: Int
def dataType: T
val io = IO(new Bundle {
val inRow = Flipped(Decoupled(Vec(dim, dataType)))
val outCol = Decoupled(Vec(dim, dataType))
})
}
class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
val sMoveUp :: sMoveLeft :: Nil = Enum(2)
val state = RegInit(sMoveUp)
val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1)
val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1)
io.outCol.valid := 0.U
io.inRow.ready := 0.U
switch(state) {
is(sMoveUp) {
io.inRow.ready := upCounter <= dim.U
io.outCol.valid := leftCounter > 0.U
when(io.inRow.fire) {
upCounter := upCounter + 1.U
}
when(upCounter === (dim-1).U) {
state := sMoveLeft
leftCounter := 0.U
}
when(io.outCol.fire) {
leftCounter := leftCounter - 1.U
}
}
is(sMoveLeft) {
io.inRow.ready := leftCounter <= dim.U // TODO: this is naive
io.outCol.valid := upCounter > 0.U
when(leftCounter === (dim-1).U) {
state := sMoveUp
}
when(io.inRow.fire) {
leftCounter := leftCounter + 1.U
upCounter := 0.U
}
when(io.outCol.fire) {
upCounter := upCounter - 1.U
}
}
}
// Propagate input from bottom row to top row systolically in the move up phase
// TODO: need to iterate over columns to connect Chisel values of type T
// Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?)
for (colIdx <- 0 until dim) {
regArray.foldRight(io.inRow.bits(colIdx)) {
case (regRow, prevReg) =>
when (state === sMoveUp) {
regRow(colIdx) := prevReg
}
regRow(colIdx)
}
}
// Propagate input from right side to left side systolically in the move left phase
for (rowIdx <- 0 until dim) {
regArrayT.foldRight(io.inRow.bits(rowIdx)) {
case (regCol, prevReg) =>
when (state === sMoveLeft) {
regCol(rowIdx) := prevReg
}
regCol(rowIdx)
}
}
// Pull from the left side or the top side based on the state
for (idx <- 0 until dim) {
when (state === sMoveUp) {
io.outCol.bits(idx) := regArray(0)(idx)
}.elsewhen(state === sMoveLeft) {
io.outCol.bits(idx) := regArrayT(0)(idx)
}.otherwise {
io.outCol.bits(idx) := DontCare
}
}
}
class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val LEFT_DIR = 0.U(1.W)
val UP_DIR = 1.U(1.W)
class PE extends Module {
val io = IO(new Bundle {
val inR = Input(dataType)
val inD = Input(dataType)
val outL = Output(dataType)
val outU = Output(dataType)
val dir = Input(UInt(1.W))
val en = Input(Bool())
})
val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en)
io.outU := reg
io.outL := reg
}
val pes = Seq.fill(dim,dim)(Module(new PE))
val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter
val dir = RegInit(LEFT_DIR)
// Wire up horizontal signals
for (row <- 0 until dim; col <- 0 until dim) {
val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL
pes(row)(col).io.inR := right_in
}
// Wire up vertical signals
for (row <- 0 until dim; col <- 0 until dim) {
val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU
pes(row)(col).io.inD := down_in
}
// Wire up global signals
pes.flatten.foreach(_.io.dir := dir)
pes.flatten.foreach(_.io.en := io.inRow.fire)
io.outCol.valid := true.B
io.inRow.ready := true.B
val left_out = VecInit(pes.transpose.head.map(_.io.outL))
val up_out = VecInit(pes.head.map(_.io.outU))
io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out)
when (io.inRow.fire) {
counter := wrappingAdd(counter, 1.U, dim)
}
when (counter === (dim-1).U && io.inRow.fire) {
dir := ~dir
}
}
class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
// state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise
val state = RegInit(0.U(1.W))
val countInc = io.inRow.fire || io.outCol.fire
val (countValue, countWrap) = Counter(countInc, dim)
io.inRow.ready := state === 0.U
io.outCol.valid := state === 1.U
for (i <- 0 until dim) {
for (j <- 0 until dim) {
when(countValue === i.U && io.inRow.fire) {
regArray(i)(j) := io.inRow.bits(j)
}
}
}
for (i <- 0 until dim) {
io.outCol.bits(i) := 0.U
for (j <- 0 until dim) {
when(countValue === j.U) {
io.outCol.bits(i) := regArrayT(j)(i)
}
}
}
when (io.inRow.fire && countWrap) {
state := 1.U
}
when (io.outCol.fire && countWrap) {
state := 0.U
}
assert(!(state === 0.U) || !io.outCol.fire)
assert(!(state === 1.U) || !io.inRow.fire)
}
| module PE_145( // @[Transposer.scala:100:9]
input clock, // @[Transposer.scala:100:9]
input reset, // @[Transposer.scala:100:9]
input [7:0] io_inR, // @[Transposer.scala:101:16]
input [7:0] io_inD, // @[Transposer.scala:101:16]
output [7:0] io_outL, // @[Transposer.scala:101:16]
output [7:0] io_outU, // @[Transposer.scala:101:16]
input io_dir, // @[Transposer.scala:101:16]
input io_en // @[Transposer.scala:101:16]
);
wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9]
wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9]
wire io_dir_0 = io_dir; // @[Transposer.scala:100:9]
wire io_en_0 = io_en; // @[Transposer.scala:100:9]
wire [7:0] io_outL_0; // @[Transposer.scala:100:9]
wire [7:0] io_outU_0; // @[Transposer.scala:100:9]
wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36]
wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}]
reg [7:0] reg_0; // @[Transposer.scala:110:24]
assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
always @(posedge clock) begin // @[Transposer.scala:100:9]
if (io_en_0) // @[Transposer.scala:100:9]
reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}]
always @(posedge)
assign io_outL = io_outL_0; // @[Transposer.scala:100:9]
assign io_outU = io_outU_0; // @[Transposer.scala:100:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_36( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input io_in_a_bits_source, // @[Monitor.scala:20:14]
input [8:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input io_in_d_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [8:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count = 1'h0; // @[Edges.scala:234:25]
wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27]
wire c_first_count = 1'h0; // @[Edges.scala:234:25]
wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21]
wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25]
wire c_set = 1'h0; // @[Monitor.scala:738:34]
wire c_set_wo_ready = 1'h0; // @[Monitor.scala:739:34]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last = 1'h1; // @[Edges.scala:232:33]
wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33]
wire [1:0] _c_first_beats1_decode_T_1 = 2'h3; // @[package.scala:243:76]
wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_beats1_decode_T_2 = 2'h0; // @[package.scala:243:46]
wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_wo_ready_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_wo_ready_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_interm_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_interm_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_interm_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_4_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_5_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_first_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_first_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_first_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_first_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_set_wo_ready_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_set_wo_ready_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_set_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_set_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_opcodes_set_interm_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_opcodes_set_interm_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_sizes_set_interm_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_sizes_set_interm_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_opcodes_set_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_opcodes_set_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_sizes_set_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_sizes_set_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_probe_ack_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_probe_ack_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_probe_ack_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_probe_ack_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _same_cycle_resp_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _same_cycle_resp_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _same_cycle_resp_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _same_cycle_resp_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _same_cycle_resp_WIRE_4_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _same_cycle_resp_WIRE_5_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [17:0] _c_sizes_set_T_1 = 18'h0; // @[Monitor.scala:768:52]
wire [3:0] c_opcodes_set = 4'h0; // @[Monitor.scala:740:34]
wire [3:0] c_sizes_set = 4'h0; // @[Monitor.scala:741:34]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_opcodes_set_T = 4'h0; // @[Monitor.scala:767:79]
wire [3:0] _c_sizes_set_T = 4'h0; // @[Monitor.scala:768:77]
wire [18:0] _c_opcodes_set_T_1 = 19'h0; // @[Monitor.scala:767:54]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [1:0] _c_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _c_set_T = 2'h1; // @[OneHot.scala:58:35]
wire [4:0] _c_first_beats1_decode_T = 5'h3; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [1:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire _source_ok_T = ~io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [4:0] _GEN = 5'h3 << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [4:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [4:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [4:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [1:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[1:0]; // @[package.scala:243:{71,76}]
wire [1:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [8:0] _is_aligned_T = {7'h0, io_in_a_bits_address_0[1:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 9'h0; // @[Edges.scala:21:{16,24}]
wire mask_sizeOH_shiftAmount = _mask_sizeOH_T[0]; // @[OneHot.scala:64:49]
wire [1:0] _mask_sizeOH_T_1 = 2'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [1:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1; // @[OneHot.scala:65:{12,27}]
wire [1:0] mask_sizeOH = {_mask_sizeOH_T_2[1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_0_1 = io_in_a_bits_size_0[1]; // @[Misc.scala:206:21]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_1_2 = mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire _source_ok_T_1 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_1; // @[Parameters.scala:1138:31]
wire _T_898 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_898; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_898; // @[Decoupled.scala:51:35]
wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35]
wire [1:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[1:0]; // @[package.scala:243:{71,76}]
wire [1:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
reg a_first_counter; // @[Edges.scala:229:27]
wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28]
wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [1:0] size; // @[Monitor.scala:389:22]
reg source; // @[Monitor.scala:390:22]
reg [8:0] address; // @[Monitor.scala:391:22]
wire _T_966 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_966; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_966; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_966; // @[Decoupled.scala:51:35]
wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35]
wire [4:0] _GEN_0 = 5'h3 << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [4:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [4:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [4:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [1:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[1:0]; // @[package.scala:243:{71,76}]
wire [1:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
reg d_first_counter; // @[Edges.scala:229:27]
wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28]
wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] size_1; // @[Monitor.scala:540:22]
reg source_1; // @[Monitor.scala:541:22]
reg [1:0] inflight; // @[Monitor.scala:614:27]
reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [3:0] inflight_sizes; // @[Monitor.scala:618:33]
wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35]
wire [1:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[1:0]; // @[package.scala:243:{71,76}]
wire [1:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
reg a_first_counter_1; // @[Edges.scala:229:27]
wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35]
wire [1:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[1:0]; // @[package.scala:243:{71,76}]
wire [1:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire a_set; // @[Monitor.scala:626:34]
wire a_set_wo_ready; // @[Monitor.scala:627:34]
wire [3:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [3:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [3:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [3:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [3:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [3:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [3:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [3:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [3:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [3:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [3:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [3:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [15:0] _a_opcode_lookup_T_6 = {12'h0, _a_opcode_lookup_T_1}; // @[Monitor.scala:637:{44,97}]
wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [3:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [15:0] _a_size_lookup_T_6 = {12'h0, _a_size_lookup_T_1}; // @[Monitor.scala:637:97, :641:{40,91}]
wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [1:0] _GEN_2 = {1'h0, io_in_a_bits_source_0}; // @[OneHot.scala:58:35]
wire [1:0] _GEN_3 = 2'h1 << _GEN_2; // @[OneHot.scala:58:35]
wire [1:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_3; // @[OneHot.scala:58:35]
wire [1:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_3; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T & _a_set_wo_ready_T[0]; // @[OneHot.scala:58:35]
wire _T_831 = _T_898 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_831 & _a_set_T[0]; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_831 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_831 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [3:0] _GEN_4 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [3:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_4; // @[Monitor.scala:659:79]
wire [3:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_4; // @[Monitor.scala:659:79, :660:77]
wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_831 ? _a_opcodes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [17:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_831 ? _a_sizes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire d_clr; // @[Monitor.scala:664:34]
wire d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [3:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [3:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_5 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_5; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_5; // @[Monitor.scala:673:46, :783:46]
wire _T_877 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [1:0] _GEN_6 = {1'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35]
wire [1:0] _GEN_7 = 2'h1 << _GEN_6; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_7; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_7; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_7; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_7; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_877 & ~d_release_ack & _d_clr_wo_ready_T[0]; // @[OneHot.scala:58:35]
wire _T_846 = _T_966 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_846 & _d_clr_T[0]; // @[OneHot.scala:58:35]
wire [30:0] _d_opcodes_clr_T_5 = 31'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_846 ? _d_opcodes_clr_T_5[3:0] : 4'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [30:0] _d_sizes_clr_T_5 = 31'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_846 ? _d_sizes_clr_T_5[3:0] : 4'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [1:0] _inflight_T = {inflight[1], inflight[0] | a_set}; // @[Monitor.scala:614:27, :626:34, :705:27]
wire _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [1:0] _inflight_T_2 = {1'h0, _inflight_T[0] & _inflight_T_1}; // @[Monitor.scala:705:{27,36,38}]
wire [3:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [3:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [3:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [3:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [3:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [3:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [1:0] inflight_1; // @[Monitor.scala:726:35]
wire [1:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [3:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [3:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [3:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [3:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35]
wire [1:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[1:0]; // @[package.scala:243:{71,76}]
wire [1:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
reg d_first_counter_2; // @[Edges.scala:229:27]
wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28]
wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [3:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [15:0] _c_opcode_lookup_T_6 = {12'h0, _c_opcode_lookup_T_1}; // @[Monitor.scala:637:97, :749:{44,97}]
wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [3:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [15:0] _c_size_lookup_T_6 = {12'h0, _c_size_lookup_T_1}; // @[Monitor.scala:637:97, :750:{42,93}]
wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire d_clr_1; // @[Monitor.scala:774:34]
wire d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [3:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [3:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_942 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_942 & d_release_ack_1 & _d_clr_wo_ready_T_1[0]; // @[OneHot.scala:58:35]
wire _T_924 = _T_966 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_924 & _d_clr_T_1[0]; // @[OneHot.scala:58:35]
wire [30:0] _d_opcodes_clr_T_11 = 31'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_924 ? _d_opcodes_clr_T_11[3:0] : 4'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [30:0] _d_sizes_clr_T_11 = 31'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_924 ? _d_sizes_clr_T_11[3:0] : 4'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113]
wire _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [1:0] _inflight_T_5 = {1'h0, _inflight_T_3[0] & _inflight_T_4}; // @[Monitor.scala:814:{35,44,46}]
wire [3:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [3:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [3:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [3:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_82( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_166 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File TLChannelCompactor.scala:
package testchipip.serdes
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
trait TLFieldHelper {
def getBodyFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleB => Seq(b.mask, b.data, b.corrupt)
case b: TLBundleC => Seq( b.data, b.corrupt)
case b: TLBundleD => Seq( b.data, b.corrupt)
case b: TLBundleE => Seq()
}
def getConstFields(b: TLChannel): Seq[Data] = b match {
case b: TLBundleA => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleB => Seq(b.opcode, b.param, b.size, b.source, b.address )
case b: TLBundleC => Seq(b.opcode, b.param, b.size, b.source, b.address, b.user, b.echo )
case b: TLBundleD => Seq(b.opcode, b.param, b.size, b.source, b.user, b.echo, b.sink, b.denied)
case b: TLBundleE => Seq( b.sink )
}
def minTLPayloadWidth(b: TLChannel): Int = Seq(getBodyFields(b), getConstFields(b)).map(_.map(_.getWidth).sum).max
def minTLPayloadWidth(bs: Seq[TLChannel]): Int = bs.map(b => minTLPayloadWidth(b)).max
def minTLPayloadWidth(b: TLBundle): Int = minTLPayloadWidth(Seq(b.a, b.b, b.c, b.d, b.e).map(_.bits))
}
class TLBeat(val beatWidth: Int) extends Bundle {
val payload = UInt(beatWidth.W)
val head = Bool()
val tail = Bool()
}
abstract class TLChannelToBeat[T <: TLChannel](gen: => T, edge: TLEdge, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper {
override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_")
val beatWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Flipped(Decoupled(gen))
val beat = Decoupled(new TLBeat(beatWidth))
})
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
// convert decoupled to irrevocable
val q = Module(new Queue(gen, 1, pipe=true, flow=true))
q.io.enq <> io.protocol
val protocol = q.io.deq
val has_body = Wire(Bool())
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val head = edge.first(protocol.bits, protocol.fire)
val tail = edge.last(protocol.bits, protocol.fire)
val body = Cat( body_fields.filter(_.getWidth > 0).map(_.asUInt))
val const = Cat(const_fields.filter(_.getWidth > 0).map(_.asUInt))
val is_body = RegInit(false.B)
io.beat.valid := protocol.valid
protocol.ready := io.beat.ready && (is_body || !has_body)
io.beat.bits.head := head && !is_body
io.beat.bits.tail := tail && (is_body || !has_body)
io.beat.bits.payload := Mux(is_body, body, const)
when (io.beat.fire && io.beat.bits.head) { is_body := true.B }
when (io.beat.fire && io.beat.bits.tail) { is_body := false.B }
}
abstract class TLChannelFromBeat[T <: TLChannel](gen: => T, nameSuffix: Option[String])(implicit val p: Parameters) extends Module with TLFieldHelper {
override def desiredName = (Seq(this.getClass.getSimpleName) ++ nameSuffix ++ Seq(gen.params.shortName)).mkString("_")
val beatWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Decoupled(gen)
val beat = Flipped(Decoupled(new TLBeat(beatWidth)))
})
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
val protocol = Wire(Decoupled(gen))
io.protocol <> protocol
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val is_const = RegInit(true.B)
val const_reg = Reg(UInt(const_fields.map(_.getWidth).sum.W))
val const = Mux(io.beat.bits.head, io.beat.bits.payload, const_reg)
io.beat.ready := (is_const && !io.beat.bits.tail) || protocol.ready
protocol.valid := (!is_const || io.beat.bits.tail) && io.beat.valid
def assign(i: UInt, sigs: Seq[Data]) = {
var t = i
for (s <- sigs.reverse) {
s := t.asTypeOf(s.cloneType)
t = t >> s.getWidth
}
}
assign(const, const_fields)
assign(io.beat.bits.payload, body_fields)
when (io.beat.fire && io.beat.bits.head) { is_const := false.B; const_reg := io.beat.bits.payload }
when (io.beat.fire && io.beat.bits.tail) { is_const := true.B }
}
class TLAToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleA(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
}
class TLAFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleA(bundle), nameSuffix)(p) {
when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLBToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleB(bundle), edgeOut, nameSuffix)(p) {
has_body := edgeOut.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
}
class TLBFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleB(bundle), nameSuffix)(p) {
when (io.beat.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLCToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleC(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits)
}
class TLCFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleC(bundle), nameSuffix)(p)
class TLDToBeat(edgeOut: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleD(bundle), edgeOut, nameSuffix)(p) {
has_body := edgeOut.hasData(protocol.bits)
}
class TLDFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleD(bundle), nameSuffix)(p)
class TLEToBeat(edgeIn: TLEdge, bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelToBeat(new TLBundleE(bundle), edgeIn, nameSuffix)(p) {
has_body := edgeIn.hasData(protocol.bits)
}
class TLEFromBeat(bundle: TLBundleParameters, nameSuffix: Option[String])(implicit p: Parameters) extends TLChannelFromBeat(new TLBundleE(bundle), nameSuffix)(p)
| module TLEFromBeat_serial_tl_0_a64d64s8k8z8c( // @[TLChannelCompactor.scala:140:7]
input clock, // @[TLChannelCompactor.scala:140:7]
input reset, // @[TLChannelCompactor.scala:140:7]
output io_protocol_valid, // @[TLChannelCompactor.scala:75:14]
output [7:0] io_protocol_bits_sink, // @[TLChannelCompactor.scala:75:14]
output io_beat_ready, // @[TLChannelCompactor.scala:75:14]
input io_beat_valid, // @[TLChannelCompactor.scala:75:14]
input [7:0] io_beat_bits_payload, // @[TLChannelCompactor.scala:75:14]
input io_beat_bits_head, // @[TLChannelCompactor.scala:75:14]
input io_beat_bits_tail // @[TLChannelCompactor.scala:75:14]
);
wire io_beat_valid_0 = io_beat_valid; // @[TLChannelCompactor.scala:140:7]
wire [7:0] io_beat_bits_payload_0 = io_beat_bits_payload; // @[TLChannelCompactor.scala:140:7]
wire io_beat_bits_head_0 = io_beat_bits_head; // @[TLChannelCompactor.scala:140:7]
wire io_beat_bits_tail_0 = io_beat_bits_tail; // @[TLChannelCompactor.scala:140:7]
wire io_protocol_ready = 1'h0; // @[TLChannelCompactor.scala:140:7]
wire protocol_ready = 1'h0; // @[TLChannelCompactor.scala:83:22]
wire protocol_valid; // @[TLChannelCompactor.scala:83:22]
wire [7:0] protocol_bits_sink; // @[TLChannelCompactor.scala:83:22]
wire _io_beat_ready_T_2; // @[TLChannelCompactor.scala:91:53]
wire [7:0] io_protocol_bits_sink_0; // @[TLChannelCompactor.scala:140:7]
wire io_protocol_valid_0; // @[TLChannelCompactor.scala:140:7]
wire io_beat_ready_0; // @[TLChannelCompactor.scala:140:7]
wire _protocol_valid_T_2; // @[TLChannelCompactor.scala:92:54]
assign io_protocol_valid_0 = protocol_valid; // @[TLChannelCompactor.scala:83:22, :140:7]
wire [7:0] _protocol_bits_sink_WIRE; // @[TLChannelCompactor.scala:97:22]
assign io_protocol_bits_sink_0 = protocol_bits_sink; // @[TLChannelCompactor.scala:83:22, :140:7]
reg is_const; // @[TLChannelCompactor.scala:88:25]
reg [7:0] const_reg; // @[TLChannelCompactor.scala:89:22]
wire [7:0] const_0 = io_beat_bits_head_0 ? io_beat_bits_payload_0 : const_reg; // @[TLChannelCompactor.scala:89:22, :90:18, :140:7]
assign _protocol_bits_sink_WIRE = const_0; // @[TLChannelCompactor.scala:90:18, :97:22]
wire _io_beat_ready_T = ~io_beat_bits_tail_0; // @[TLChannelCompactor.scala:91:33, :140:7]
wire _io_beat_ready_T_1 = is_const & _io_beat_ready_T; // @[TLChannelCompactor.scala:88:25, :91:{30,33}]
assign _io_beat_ready_T_2 = _io_beat_ready_T_1; // @[TLChannelCompactor.scala:91:{30,53}]
assign io_beat_ready_0 = _io_beat_ready_T_2; // @[TLChannelCompactor.scala:91:53, :140:7]
wire _protocol_valid_T = ~is_const; // @[TLChannelCompactor.scala:88:25, :92:22]
wire _protocol_valid_T_1 = _protocol_valid_T | io_beat_bits_tail_0; // @[TLChannelCompactor.scala:92:{22,32}, :140:7]
assign _protocol_valid_T_2 = _protocol_valid_T_1 & io_beat_valid_0; // @[TLChannelCompactor.scala:92:{32,54}, :140:7]
assign protocol_valid = _protocol_valid_T_2; // @[TLChannelCompactor.scala:83:22, :92:54]
assign protocol_bits_sink = _protocol_bits_sink_WIRE; // @[TLChannelCompactor.scala:83:22, :97:22]
wire _T_3 = io_beat_ready_0 & io_beat_valid_0; // @[Decoupled.scala:51:35]
wire _T_2 = _T_3 & io_beat_bits_head_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[TLChannelCompactor.scala:140:7]
if (reset) // @[TLChannelCompactor.scala:140:7]
is_const <= 1'h1; // @[TLChannelCompactor.scala:88:25]
else // @[TLChannelCompactor.scala:140:7]
is_const <= _T_3 & io_beat_bits_tail_0 | ~_T_2 & is_const; // @[Decoupled.scala:51:35]
if (_T_2) // @[TLChannelCompactor.scala:104:22]
const_reg <= io_beat_bits_payload_0; // @[TLChannelCompactor.scala:89:22, :140:7]
always @(posedge)
assign io_protocol_valid = io_protocol_valid_0; // @[TLChannelCompactor.scala:140:7]
assign io_protocol_bits_sink = io_protocol_bits_sink_0; // @[TLChannelCompactor.scala:140:7]
assign io_beat_ready = io_beat_ready_0; // @[TLChannelCompactor.scala:140:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_89( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_99 io_out_source_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputUnit_11( // @[InputUnit.scala:158:7]
input clock, // @[InputUnit.scala:158:7]
input reset, // @[InputUnit.scala:158:7]
input io_vcalloc_req_ready, // @[InputUnit.scala:170:14]
output io_vcalloc_req_valid, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_2_0, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_7, // @[InputUnit.scala:170:14]
input io_salloc_req_0_ready, // @[InputUnit.scala:170:14]
output io_salloc_req_0_valid, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14]
output io_out_0_valid, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14]
output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14]
output [4:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [4:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14]
output [2:0] io_debug_va_stall, // @[InputUnit.scala:170:14]
output [2:0] io_debug_sa_stall, // @[InputUnit.scala:170:14]
input io_in_flit_0_valid, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14]
input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
input [4:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14]
output [7:0] io_in_credit_return, // @[InputUnit.scala:170:14]
output [7:0] io_in_vc_free // @[InputUnit.scala:170:14]
);
wire vcalloc_vals_7; // @[InputUnit.scala:266:32]
wire vcalloc_vals_6; // @[InputUnit.scala:266:32]
wire vcalloc_vals_5; // @[InputUnit.scala:266:32]
wire vcalloc_vals_4; // @[InputUnit.scala:266:32]
wire vcalloc_vals_3; // @[InputUnit.scala:266:32]
wire vcalloc_vals_2; // @[InputUnit.scala:266:32]
wire vcalloc_vals_1; // @[InputUnit.scala:266:32]
wire _salloc_arb_io_in_1_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_2_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_3_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_4_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_5_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_6_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_7_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26]
wire [7:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26]
wire _route_arbiter_io_in_1_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_2_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_3_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_4_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_5_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_6_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_7_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29]
wire [2:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29]
wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_6_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_7_bits_payload; // @[InputUnit.scala:181:28]
reg [2:0] states_1_g; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_2_0; // @[InputUnit.scala:192:19]
reg states_1_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg [2:0] states_1_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_1_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_1_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_1_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_1_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_2_g; // @[InputUnit.scala:192:19]
reg states_2_vc_sel_2_0; // @[InputUnit.scala:192:19]
reg states_2_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg [2:0] states_2_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_2_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_2_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_2_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_2_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_3_g; // @[InputUnit.scala:192:19]
reg states_3_vc_sel_2_0; // @[InputUnit.scala:192:19]
reg states_3_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg [2:0] states_3_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_3_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_3_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_3_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_3_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_4_g; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_2_0; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg [2:0] states_4_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_4_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_4_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_4_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_4_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_5_g; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_2_0; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg [2:0] states_5_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_5_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_5_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_6_g; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_2_0; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg [2:0] states_6_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_6_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_6_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_6_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_6_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_7_g; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_2_0; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_1_0; // @[InputUnit.scala:192:19]
reg [2:0] states_7_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_7_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_7_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_7_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_7_flow_egress_node_id; // @[InputUnit.scala:192:19]
wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30]
wire route_arbiter_io_in_1_valid = states_1_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_2_valid = states_2_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_3_valid = states_3_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_4_valid = states_4_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_5_valid = states_5_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_6_valid = states_6_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_7_valid = states_7_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
reg [7:0] mask; // @[InputUnit.scala:250:21]
wire [7:0] _vcalloc_filter_T_3 = {vcalloc_vals_7, vcalloc_vals_6, vcalloc_vals_5, vcalloc_vals_4, vcalloc_vals_3, vcalloc_vals_2, vcalloc_vals_1, 1'h0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32]
wire [15:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 16'h1 : _vcalloc_filter_T_3[1] ? 16'h2 : _vcalloc_filter_T_3[2] ? 16'h4 : _vcalloc_filter_T_3[3] ? 16'h8 : _vcalloc_filter_T_3[4] ? 16'h10 : _vcalloc_filter_T_3[5] ? 16'h20 : _vcalloc_filter_T_3[6] ? 16'h40 : _vcalloc_filter_T_3[7] ? 16'h80 : vcalloc_vals_1 ? 16'h200 : vcalloc_vals_2 ? 16'h400 : vcalloc_vals_3 ? 16'h800 : vcalloc_vals_4 ? 16'h1000 : vcalloc_vals_5 ? 16'h2000 : vcalloc_vals_6 ? 16'h4000 : {vcalloc_vals_7, 15'h0}; // @[OneHot.scala:85:71]
wire [7:0] vcalloc_sel = vcalloc_filter[7:0] | vcalloc_filter[15:8]; // @[Mux.scala:50:70]
wire io_vcalloc_req_valid_0 = vcalloc_vals_1 | vcalloc_vals_2 | vcalloc_vals_3 | vcalloc_vals_4 | vcalloc_vals_5 | vcalloc_vals_6 | vcalloc_vals_7; // @[package.scala:81:59]
assign vcalloc_vals_1 = states_1_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_2 = states_2_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_3 = states_3_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_4 = states_4_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_5 = states_5_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_6 = states_6_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_7 = states_7_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
wire _GEN_1 = _GEN_0 & vcalloc_sel[1]; // @[Mux.scala:32:36]
wire _GEN_2 = _GEN_0 & vcalloc_sel[2]; // @[Mux.scala:32:36]
wire _GEN_3 = _GEN_0 & vcalloc_sel[3]; // @[Mux.scala:32:36]
wire _GEN_4 = _GEN_0 & vcalloc_sel[4]; // @[Mux.scala:32:36]
wire _GEN_5 = _GEN_0 & vcalloc_sel[5]; // @[Mux.scala:32:36]
wire _GEN_6 = _GEN_0 & vcalloc_sel[6]; // @[Mux.scala:32:36]
wire _GEN_7 = _GEN_0 & vcalloc_sel[7]; // @[Mux.scala:32:36] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_16( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File IngressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class IngressUnit(
ingressNodeId: Int,
cParam: IngressChannelParams,
outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean,
combineSAST: Boolean,
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits)))
}
val io = IO(new IngressUnitIO)
val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2,
flow=combineRCVA))
assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR))
route_buffer.io.enq.bits.head := io.in.bits.head
route_buffer.io.enq.bits.tail := io.in.bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
route_buffer.io.enq.bits.flow := DontCare
} else {
route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U
route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U
route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U
route_buffer.io.enq.bits.flow.egress_node := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNode.U)
)
route_buffer.io.enq.bits.flow.egress_node_id := Mux1H(
flows.map(_.egressId.U === io.in.bits.egress_id),
flows.map(_.egressNodeId.U)
)
}
route_buffer.io.enq.bits.payload := io.in.bits.payload
route_buffer.io.enq.bits.virt_channel_id := DontCare
io.router_req.bits.src_virt_id := 0.U
io.router_req.bits.flow := route_buffer.io.enq.bits.flow
val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U
route_buffer.io.enq.valid := io.in.valid && (
io.router_req.ready || !io.in.bits.head || at_dest)
io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest
io.in.ready := route_buffer.io.enq.ready && (
io.router_req.ready || !io.in.bits.head || at_dest)
route_q.io.enq.valid := io.router_req.fire
route_q.io.enq.bits := io.router_resp
when (io.in.fire && io.in.bits.head && at_dest) {
route_q.io.enq.valid := true.B
route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (egressParams(o).egressId.U === io.in.bits.egress_id) {
route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B
}
}
}
assert(!(route_q.io.enq.valid && !route_q.io.enq.ready))
val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2))
val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams),
1, pipe=true))
vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits
io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel
io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow
io.vcalloc_req.bits.in_vc := 0.U
val head = route_buffer.io.deq.bits.head
val tail = route_buffer.io.deq.bits.tail
vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head)
)
io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid &&
head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready)
route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready &&
(route_q.io.deq.valid || !head) &&
(io.vcalloc_req.ready || !head) &&
(vcalloc_q.io.enq.ready || !head))
route_q.io.deq.ready := (route_buffer.io.deq.fire && tail)
vcalloc_q.io.enq.valid := io.vcalloc_req.fire
vcalloc_q.io.enq.bits := io.vcalloc_resp
assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready))
io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel
io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail
val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail
io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block
vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire
val out_bundle = if (combineSAST) {
Wire(Valid(new SwitchBundle(outParams, egressParams)))
} else {
Reg(Valid(new SwitchBundle(outParams, egressParams)))
}
io.out(0) := out_bundle
out_bundle.valid := vcalloc_buffer.io.deq.fire
out_bundle.bits.flit := vcalloc_buffer.io.deq.bits
out_bundle.bits.flit.virt_channel_id := 0.U
val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq
out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh,
vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq)
io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready
io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready
// TODO: We should not generate input/ingress/output/egress units for untraversable channels
if (!cParam.traversable) {
io.in.ready := false.B
io.router_req.valid := false.B
io.router_req.bits := DontCare
io.vcalloc_req.valid := false.B
io.vcalloc_req.bits := DontCare
io.salloc_req.foreach(_.valid := false.B)
io.salloc_req.foreach(_.bits := DontCare)
io.out.foreach(_.valid := false.B)
io.out.foreach(_.bits := DontCare)
}
}
| module IngressUnit_32( // @[IngressUnit.scala:11:7]
input clock, // @[IngressUnit.scala:11:7]
input reset, // @[IngressUnit.scala:11:7]
input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14]
output io_vcalloc_req_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_3, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_4, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_5, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_6, // @[IngressUnit.scala:24:14]
input io_vcalloc_resp_vc_sel_0_7, // @[IngressUnit.scala:24:14]
input io_out_credit_available_2_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_1_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_0, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_1, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_2, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_3, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_4, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_5, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_6, // @[IngressUnit.scala:24:14]
input io_out_credit_available_0_7, // @[IngressUnit.scala:24:14]
input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14]
output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14]
output io_out_0_valid, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14]
output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14]
output [72:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14]
output [4:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14]
output [4:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14]
output [2:0] io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14]
output io_in_ready, // @[IngressUnit.scala:24:14]
input io_in_valid, // @[IngressUnit.scala:24:14]
input io_in_bits_head, // @[IngressUnit.scala:24:14]
input io_in_bits_tail, // @[IngressUnit.scala:24:14]
input [72:0] io_in_bits_payload, // @[IngressUnit.scala:24:14]
input [4:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14]
);
wire _GEN; // @[Decoupled.scala:51:35]
wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_2; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_3; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_4; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_5; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_6; // @[IngressUnit.scala:76:25]
wire _vcalloc_q_io_deq_bits_vc_sel_0_7; // @[IngressUnit.scala:76:25]
wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_head; // @[IngressUnit.scala:75:30]
wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30]
wire [72:0] _vcalloc_buffer_io_deq_bits_payload; // @[IngressUnit.scala:75:30]
wire [2:0] _vcalloc_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:75:30]
wire [4:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:75:30]
wire [1:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:75:30]
wire [4:0] _vcalloc_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:75:30]
wire [1:0] _vcalloc_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:75:30]
wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23]
wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23]
wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28]
wire [72:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28]
wire [4:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28]
wire [4:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28]
wire [1:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28]
wire [2:0] _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_4 = io_in_bits_egress_id == 5'h16; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_5 = io_in_bits_egress_id == 5'h19; // @[IngressUnit.scala:30:72]
wire _route_buffer_io_enq_bits_flow_egress_node_id_T_6 = io_in_bits_egress_id == 5'h1C; // @[IngressUnit.scala:30:72]
wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_10 = {1'h0, (_route_buffer_io_enq_bits_flow_egress_node_id_T_4 ? 3'h5 : 3'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_5 ? 3'h6 : 3'h0)} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_6 ? 4'h9 : 4'h0) | ((&io_in_bits_egress_id) ? 4'hA : 4'h0); // @[Mux.scala:30:73]
assign _GEN = _route_buffer_io_enq_ready & io_in_valid & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_10 == 4'hD; // @[Mux.scala:30:73]
wire route_q_io_enq_valid = _GEN | io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_10 != 4'hD; // @[Mux.scala:30:73]
wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}]
wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29]
wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_427( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_34( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File SinkC.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class SinkCResponse(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val last = Bool()
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val source = UInt(params.inner.bundle.sourceBits.W)
val param = UInt(3.W)
val data = Bool()
}
class PutBufferCEntry(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val data = UInt(params.inner.bundle.dataBits.W)
val corrupt = Bool()
}
class SinkC(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Decoupled(new FullRequest(params)) // Release
val resp = Valid(new SinkCResponse(params)) // ProbeAck
val c = Flipped(Decoupled(new TLBundleC(params.inner.bundle)))
// Find 'way' via MSHR CAM lookup
val set = UInt(params.setBits.W)
val way = Flipped(UInt(params.wayBits.W))
// ProbeAck write-back
val bs_adr = Decoupled(new BankedStoreInnerAddress(params))
val bs_dat = new BankedStoreInnerPoison(params)
// SourceD sideband
val rel_pop = Flipped(Decoupled(new PutBufferPop(params)))
val rel_beat = new PutBufferCEntry(params)
})
if (params.firstLevel) {
// Tie off unused ports
io.req.valid := false.B
io.req.bits := DontCare
io.resp.valid := false.B
io.resp.bits := DontCare
io.c.ready := true.B
io.set := 0.U
io.bs_adr.valid := false.B
io.bs_adr.bits := DontCare
io.bs_dat := DontCare
io.rel_pop.ready := true.B
io.rel_beat := DontCare
} else {
// No restrictions on the type of buffer
val c = params.micro.innerBuf.c(io.c)
val (tag, set, offset) = params.parseAddress(c.bits.address)
val (first, last, _, beat) = params.inner.count(c)
val hasData = params.inner.hasData(c.bits)
val raw_resp = c.bits.opcode === TLMessages.ProbeAck || c.bits.opcode === TLMessages.ProbeAckData
val resp = Mux(c.valid, raw_resp, RegEnable(raw_resp, c.valid))
// Handling of C is broken into two cases:
// ProbeAck
// if hasData, must be written to BankedStore
// if last beat, trigger resp
// Release
// if first beat, trigger req
// if hasData, go to putBuffer
// if hasData && first beat, must claim a list
assert (!(c.valid && c.bits.corrupt), "Data poisoning unavailable")
io.set := Mux(c.valid, set, RegEnable(set, c.valid)) // finds us the way
// Cut path from inner C to the BankedStore SRAM setup
// ... this makes it easier to layout the L2 data banks far away
val bs_adr = Wire(chiselTypeOf(io.bs_adr))
io.bs_adr <> Queue(bs_adr, 1, pipe=true)
io.bs_dat.data := RegEnable(c.bits.data, bs_adr.fire)
bs_adr.valid := resp && (!first || (c.valid && hasData))
bs_adr.bits.noop := !c.valid
bs_adr.bits.way := io.way
bs_adr.bits.set := io.set
bs_adr.bits.beat := Mux(c.valid, beat, RegEnable(beat + bs_adr.ready.asUInt, c.valid))
bs_adr.bits.mask := ~0.U(params.innerMaskBits.W)
params.ccover(bs_adr.valid && !bs_adr.ready, "SINKC_SRAM_STALL", "Data SRAM busy")
io.resp.valid := resp && c.valid && (first || last) && (!hasData || bs_adr.ready)
io.resp.bits.last := last
io.resp.bits.set := set
io.resp.bits.tag := tag
io.resp.bits.source := c.bits.source
io.resp.bits.param := c.bits.param
io.resp.bits.data := hasData
val putbuffer = Module(new ListBuffer(ListBufferParameters(new PutBufferCEntry(params), params.relLists, params.relBeats, false)))
val lists = RegInit(0.U(params.relLists.W))
val lists_set = WireInit(init = 0.U(params.relLists.W))
val lists_clr = WireInit(init = 0.U(params.relLists.W))
lists := (lists | lists_set) & ~lists_clr
val free = !lists.andR
val freeOH = ~(leftOR(~lists) << 1) & ~lists
val freeIdx = OHToUInt(freeOH)
val req_block = first && !io.req.ready
val buf_block = hasData && !putbuffer.io.push.ready
val set_block = hasData && first && !free
params.ccover(c.valid && !raw_resp && req_block, "SINKC_REQ_STALL", "No MSHR available to sink request")
params.ccover(c.valid && !raw_resp && buf_block, "SINKC_BUF_STALL", "No space in putbuffer for beat")
params.ccover(c.valid && !raw_resp && set_block, "SINKC_SET_STALL", "No space in putbuffer for request")
c.ready := Mux(raw_resp, !hasData || bs_adr.ready, !req_block && !buf_block && !set_block)
io.req.valid := !resp && c.valid && first && !buf_block && !set_block
putbuffer.io.push.valid := !resp && c.valid && hasData && !req_block && !set_block
when (!resp && c.valid && first && hasData && !req_block && !buf_block) { lists_set := freeOH }
val put = Mux(first, freeIdx, RegEnable(freeIdx, first))
io.req.bits.prio := VecInit(4.U(3.W).asBools)
io.req.bits.control:= false.B
io.req.bits.opcode := c.bits.opcode
io.req.bits.param := c.bits.param
io.req.bits.size := c.bits.size
io.req.bits.source := c.bits.source
io.req.bits.offset := offset
io.req.bits.set := set
io.req.bits.tag := tag
io.req.bits.put := put
putbuffer.io.push.bits.index := put
putbuffer.io.push.bits.data.data := c.bits.data
putbuffer.io.push.bits.data.corrupt := c.bits.corrupt
// Grant access to pop the data
putbuffer.io.pop.bits := io.rel_pop.bits.index
putbuffer.io.pop.valid := io.rel_pop.fire
io.rel_pop.ready := putbuffer.io.valid(io.rel_pop.bits.index(log2Ceil(params.relLists)-1,0))
io.rel_beat := putbuffer.io.data
when (io.rel_pop.fire && io.rel_pop.bits.last) {
lists_clr := UIntToOH(io.rel_pop.bits.index, params.relLists)
}
}
}
| module SinkC( // @[SinkC.scala:41:7]
input clock, // @[SinkC.scala:41:7]
input reset, // @[SinkC.scala:41:7]
input io_req_ready, // @[SinkC.scala:43:14]
input [2:0] io_way, // @[SinkC.scala:43:14]
input io_rel_pop_valid, // @[SinkC.scala:43:14]
input [5:0] io_rel_pop_bits_index, // @[SinkC.scala:43:14]
input io_rel_pop_bits_last // @[SinkC.scala:43:14]
);
wire io_req_ready_0 = io_req_ready; // @[SinkC.scala:41:7]
wire [2:0] io_way_0 = io_way; // @[SinkC.scala:41:7]
wire io_rel_pop_valid_0 = io_rel_pop_valid; // @[SinkC.scala:41:7]
wire [5:0] io_rel_pop_bits_index_0 = io_rel_pop_bits_index; // @[SinkC.scala:41:7]
wire io_rel_pop_bits_last_0 = io_rel_pop_bits_last; // @[SinkC.scala:41:7]
wire [3:0] io_bs_adr_bits_beat = 4'h0; // @[SinkC.scala:41:7]
wire [31:0] io_c_bits_address = 32'h0; // @[SinkC.scala:41:7]
wire [31:0] io_c_bits_data = 32'h0; // @[SinkC.scala:41:7]
wire [31:0] io_bs_dat_data = 32'h0; // @[SinkC.scala:41:7]
wire [31:0] io_rel_beat_data = 32'h0; // @[SinkC.scala:41:7]
wire io_c_ready = 1'h1; // @[SinkC.scala:41:7]
wire io_bs_adr_ready = 1'h1; // @[SinkC.scala:41:7]
wire io_rel_pop_ready = 1'h1; // @[SinkC.scala:41:7]
wire [9:0] io_req_bits_set = 10'h0; // @[SinkC.scala:41:7]
wire [9:0] io_resp_bits_set = 10'h0; // @[SinkC.scala:41:7]
wire [9:0] io_set = 10'h0; // @[SinkC.scala:41:7]
wire [9:0] io_bs_adr_bits_set = 10'h0; // @[SinkC.scala:41:7]
wire [12:0] io_req_bits_tag = 13'h0; // @[SinkC.scala:41:7]
wire [12:0] io_resp_bits_tag = 13'h0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_source = 6'h0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_offset = 6'h0; // @[SinkC.scala:41:7]
wire [5:0] io_req_bits_put = 6'h0; // @[SinkC.scala:41:7]
wire [5:0] io_resp_bits_source = 6'h0; // @[SinkC.scala:41:7]
wire [5:0] io_c_bits_source = 6'h0; // @[SinkC.scala:41:7]
wire [2:0] io_req_bits_opcode = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_req_bits_param = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_req_bits_size = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_resp_bits_param = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_opcode = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_param = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_c_bits_size = 3'h0; // @[SinkC.scala:41:7]
wire [2:0] io_bs_adr_bits_way = 3'h0; // @[SinkC.scala:41:7]
wire io_req_valid = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_prio_0 = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_prio_1 = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_prio_2 = 1'h0; // @[SinkC.scala:41:7]
wire io_req_bits_control = 1'h0; // @[SinkC.scala:41:7]
wire io_resp_valid = 1'h0; // @[SinkC.scala:41:7]
wire io_resp_bits_last = 1'h0; // @[SinkC.scala:41:7]
wire io_resp_bits_data = 1'h0; // @[SinkC.scala:41:7]
wire io_c_valid = 1'h0; // @[SinkC.scala:41:7]
wire io_c_bits_corrupt = 1'h0; // @[SinkC.scala:41:7]
wire io_bs_adr_valid = 1'h0; // @[SinkC.scala:41:7]
wire io_bs_adr_bits_noop = 1'h0; // @[SinkC.scala:41:7]
wire io_bs_adr_bits_mask = 1'h0; // @[SinkC.scala:41:7]
wire io_rel_beat_corrupt = 1'h0; // @[SinkC.scala:41:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_8( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [12:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [12:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_44 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_50 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_52 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_56 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_58 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_62 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_64 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_68 = 1'h1; // @[Parameters.scala:56:32]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [12:0] _c_first_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _c_first_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _c_first_WIRE_2_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _c_first_WIRE_3_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _c_set_wo_ready_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _c_set_wo_ready_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _c_set_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _c_set_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _c_opcodes_set_interm_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _c_opcodes_set_interm_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _c_sizes_set_interm_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _c_sizes_set_interm_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _c_opcodes_set_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _c_opcodes_set_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _c_sizes_set_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _c_sizes_set_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _c_probe_ack_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _c_probe_ack_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _c_probe_ack_WIRE_2_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _c_probe_ack_WIRE_3_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _same_cycle_resp_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _same_cycle_resp_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _same_cycle_resp_WIRE_2_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _same_cycle_resp_WIRE_3_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [12:0] _same_cycle_resp_WIRE_4_bits_address = 13'h0; // @[Bundles.scala:265:74]
wire [12:0] _same_cycle_resp_WIRE_5_bits_address = 13'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_25 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_26 = _source_ok_T_25 == 4'h4; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_29 = source_ok_uncommonBits_4 < 3'h5; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_30 = _source_ok_T_28 & _source_ok_T_29; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire _source_ok_T_31 = io_in_a_bits_source_0 == 7'h25; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31]
wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h28; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31]
wire _source_ok_T_33 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_33; // @[Parameters.scala:1138:31]
wire _source_ok_T_34 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_36 = _source_ok_T_35 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_37 = _source_ok_T_36 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_38 = _source_ok_T_37 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_39 = _source_ok_T_38 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_40 = _source_ok_T_39 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_40 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [12:0] _is_aligned_T = {7'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 13'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_4 = _uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_9 = _uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_14 = _uncommonBits_T_14[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_29 = _uncommonBits_T_29[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_34 = _uncommonBits_T_34[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_39 = _uncommonBits_T_39[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_44 = _uncommonBits_T_44[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_54 = _uncommonBits_T_54[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_41 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_41; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_42 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_48 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_54 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_60 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_43 = _source_ok_T_42 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_45 = _source_ok_T_43; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_47; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_49 = _source_ok_T_48 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_51 = _source_ok_T_49; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_53 = _source_ok_T_51; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_53; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_55 = _source_ok_T_54 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_57 = _source_ok_T_55; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_59 = _source_ok_T_57; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_59; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_61 = _source_ok_T_60 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_63 = _source_ok_T_61; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_65 = _source_ok_T_63; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_65; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_66 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_67 = _source_ok_T_66 == 4'h4; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_69 = _source_ok_T_67; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_70 = source_ok_uncommonBits_9 < 3'h5; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_71 = _source_ok_T_69 & _source_ok_T_70; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_1_5 = _source_ok_T_71; // @[Parameters.scala:1138:31]
wire _source_ok_T_72 = io_in_d_bits_source_0 == 7'h25; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_72; // @[Parameters.scala:1138:31]
wire _source_ok_T_73 = io_in_d_bits_source_0 == 7'h28; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_73; // @[Parameters.scala:1138:31]
wire _source_ok_T_74 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_74; // @[Parameters.scala:1138:31]
wire _source_ok_T_75 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_76 = _source_ok_T_75 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_77 = _source_ok_T_76 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_78 = _source_ok_T_77 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_79 = _source_ok_T_78 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_80 = _source_ok_T_79 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_81 = _source_ok_T_80 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_81 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1149 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1149; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1149; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [12:0] address; // @[Monitor.scala:391:22]
wire _T_1217 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1217; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1217; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1217; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1082 = _T_1149 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1082 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1082 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1082 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1082 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1082 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1128 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1128 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1097 = _T_1217 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1097 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1097 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1097 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1193 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1193 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1175 = _T_1217 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1175 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1175 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1175 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_62( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [13:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_d_bits_source // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire a_first_done = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [1:0] size; // @[Monitor.scala:389:22]
reg [10:0] source; // @[Monitor.scala:390:22]
reg [13:0] address; // @[Monitor.scala:391:22]
reg d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] size_1; // @[Monitor.scala:540:22]
reg [10:0] source_1; // @[Monitor.scala:541:22]
reg [1039:0] inflight; // @[Monitor.scala:614:27]
reg [4159:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [4159:0] inflight_sizes; // @[Monitor.scala:618:33]
reg a_first_counter_1; // @[Edges.scala:229:27]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire _GEN = a_first_done & ~a_first_counter_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_0 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [1039:0] inflight_1; // @[Monitor.scala:726:35]
reg [4159:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg d_first_counter_2; // @[Edges.scala:229:27]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_22( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_34 io_out_source_extend ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Metadata.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.constants.MemoryOpConstants
import freechips.rocketchip.util._
object ClientStates {
val width = 2
def Nothing = 0.U(width.W)
def Branch = 1.U(width.W)
def Trunk = 2.U(width.W)
def Dirty = 3.U(width.W)
def hasReadPermission(state: UInt): Bool = state > Nothing
def hasWritePermission(state: UInt): Bool = state > Branch
}
object MemoryOpCategories extends MemoryOpConstants {
def wr = Cat(true.B, true.B) // Op actually writes
def wi = Cat(false.B, true.B) // Future op will write
def rd = Cat(false.B, false.B) // Op only reads
def categorize(cmd: UInt): UInt = {
val cat = Cat(isWrite(cmd), isWriteIntent(cmd))
//assert(cat.isOneOf(wr,wi,rd), "Could not categorize command.")
cat
}
}
/** Stores the client-side coherence information,
* such as permissions on the data and whether the data is dirty.
* Its API can be used to make TileLink messages in response to
* memory operations, cache control oeprations, or Probe messages.
*/
class ClientMetadata extends Bundle {
/** Actual state information stored in this bundle */
val state = UInt(ClientStates.width.W)
/** Metadata equality */
def ===(rhs: UInt): Bool = state === rhs
def ===(rhs: ClientMetadata): Bool = state === rhs.state
def =/=(rhs: ClientMetadata): Bool = !this.===(rhs)
/** Is the block's data present in this cache */
def isValid(dummy: Int = 0): Bool = state > ClientStates.Nothing
/** Determine whether this cmd misses, and the new state (on hit) or param to be sent (on miss) */
private def growStarter(cmd: UInt): (Bool, UInt) = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
MuxTLookup(Cat(c, state), (false.B, 0.U), Seq(
//(effect, am now) -> (was a hit, next)
Cat(rd, Dirty) -> (true.B, Dirty),
Cat(rd, Trunk) -> (true.B, Trunk),
Cat(rd, Branch) -> (true.B, Branch),
Cat(wi, Dirty) -> (true.B, Dirty),
Cat(wi, Trunk) -> (true.B, Trunk),
Cat(wr, Dirty) -> (true.B, Dirty),
Cat(wr, Trunk) -> (true.B, Dirty),
//(effect, am now) -> (was a miss, param)
Cat(rd, Nothing) -> (false.B, NtoB),
Cat(wi, Branch) -> (false.B, BtoT),
Cat(wi, Nothing) -> (false.B, NtoT),
Cat(wr, Branch) -> (false.B, BtoT),
Cat(wr, Nothing) -> (false.B, NtoT)))
}
/** Determine what state to go to after miss based on Grant param
* For now, doesn't depend on state (which may have been Probed).
*/
private def growFinisher(cmd: UInt, param: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
import ClientStates._
val c = categorize(cmd)
//assert(c === rd || param === toT, "Client was expecting trunk permissions.")
MuxLookup(Cat(c, param), Nothing)(Seq(
//(effect param) -> (next)
Cat(rd, toB) -> Branch,
Cat(rd, toT) -> Trunk,
Cat(wi, toT) -> Trunk,
Cat(wr, toT) -> Dirty))
}
/** Does this cache have permissions on this block sufficient to perform op,
* and what to do next (Acquire message param or updated metadata). */
def onAccess(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = growStarter(cmd)
(r._1, r._2, ClientMetadata(r._2))
}
/** Does a secondary miss on the block require another Acquire message */
def onSecondaryAccess(first_cmd: UInt, second_cmd: UInt): (Bool, Bool, UInt, ClientMetadata, UInt) = {
import MemoryOpCategories._
val r1 = growStarter(first_cmd)
val r2 = growStarter(second_cmd)
val needs_second_acq = isWriteIntent(second_cmd) && !isWriteIntent(first_cmd)
val hit_again = r1._1 && r2._1
val dirties = categorize(second_cmd) === wr
val biggest_grow_param = Mux(dirties, r2._2, r1._2)
val dirtiest_state = ClientMetadata(biggest_grow_param)
val dirtiest_cmd = Mux(dirties, second_cmd, first_cmd)
(needs_second_acq, hit_again, biggest_grow_param, dirtiest_state, dirtiest_cmd)
}
/** Metadata change on a returned Grant */
def onGrant(cmd: UInt, param: UInt): ClientMetadata = ClientMetadata(growFinisher(cmd, param))
/** Determine what state to go to based on Probe param */
private def shrinkHelper(param: UInt): (Bool, UInt, UInt) = {
import ClientStates._
import TLPermissions._
MuxTLookup(Cat(param, state), (false.B, 0.U, 0.U), Seq(
//(wanted, am now) -> (hasDirtyData resp, next)
Cat(toT, Dirty) -> (true.B, TtoT, Trunk),
Cat(toT, Trunk) -> (false.B, TtoT, Trunk),
Cat(toT, Branch) -> (false.B, BtoB, Branch),
Cat(toT, Nothing) -> (false.B, NtoN, Nothing),
Cat(toB, Dirty) -> (true.B, TtoB, Branch),
Cat(toB, Trunk) -> (false.B, TtoB, Branch), // Policy: Don't notify on clean downgrade
Cat(toB, Branch) -> (false.B, BtoB, Branch),
Cat(toB, Nothing) -> (false.B, NtoN, Nothing),
Cat(toN, Dirty) -> (true.B, TtoN, Nothing),
Cat(toN, Trunk) -> (false.B, TtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Branch) -> (false.B, BtoN, Nothing), // Policy: Don't notify on clean downgrade
Cat(toN, Nothing) -> (false.B, NtoN, Nothing)))
}
/** Translate cache control cmds into Probe param */
private def cmdToPermCap(cmd: UInt): UInt = {
import MemoryOpCategories._
import TLPermissions._
MuxLookup(cmd, toN)(Seq(
M_FLUSH -> toN,
M_PRODUCE -> toB,
M_CLEAN -> toT))
}
def onCacheControl(cmd: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(cmdToPermCap(cmd))
(r._1, r._2, ClientMetadata(r._3))
}
def onProbe(param: UInt): (Bool, UInt, ClientMetadata) = {
val r = shrinkHelper(param)
(r._1, r._2, ClientMetadata(r._3))
}
}
/** Factories for ClientMetadata, including on reset */
object ClientMetadata {
def apply(perm: UInt) = {
val meta = Wire(new ClientMetadata)
meta.state := perm
meta
}
def onReset = ClientMetadata(ClientStates.Nothing)
def maximum = ClientMetadata(ClientStates.Dirty)
}
File Consts.scala:
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket.constants
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
trait ScalarOpConstants {
val SZ_BR = 3
def BR_X = BitPat("b???")
def BR_EQ = 0.U(3.W)
def BR_NE = 1.U(3.W)
def BR_J = 2.U(3.W)
def BR_N = 3.U(3.W)
def BR_LT = 4.U(3.W)
def BR_GE = 5.U(3.W)
def BR_LTU = 6.U(3.W)
def BR_GEU = 7.U(3.W)
def A1_X = BitPat("b??")
def A1_ZERO = 0.U(2.W)
def A1_RS1 = 1.U(2.W)
def A1_PC = 2.U(2.W)
def A1_RS1SHL = 3.U(2.W)
def IMM_X = BitPat("b???")
def IMM_S = 0.U(3.W)
def IMM_SB = 1.U(3.W)
def IMM_U = 2.U(3.W)
def IMM_UJ = 3.U(3.W)
def IMM_I = 4.U(3.W)
def IMM_Z = 5.U(3.W)
def A2_X = BitPat("b???")
def A2_ZERO = 0.U(3.W)
def A2_SIZE = 1.U(3.W)
def A2_RS2 = 2.U(3.W)
def A2_IMM = 3.U(3.W)
def A2_RS2OH = 4.U(3.W)
def A2_IMMOH = 5.U(3.W)
def X = BitPat("b?")
def N = BitPat("b0")
def Y = BitPat("b1")
val SZ_DW = 1
def DW_X = X
def DW_32 = false.B
def DW_64 = true.B
def DW_XPR = DW_64
}
trait MemoryOpConstants {
val NUM_XA_OPS = 9
val M_SZ = 5
def M_X = BitPat("b?????");
def M_XRD = "b00000".U; // int load
def M_XWR = "b00001".U; // int store
def M_PFR = "b00010".U; // prefetch with intent to read
def M_PFW = "b00011".U; // prefetch with intent to write
def M_XA_SWAP = "b00100".U
def M_FLUSH_ALL = "b00101".U // flush all lines
def M_XLR = "b00110".U
def M_XSC = "b00111".U
def M_XA_ADD = "b01000".U
def M_XA_XOR = "b01001".U
def M_XA_OR = "b01010".U
def M_XA_AND = "b01011".U
def M_XA_MIN = "b01100".U
def M_XA_MAX = "b01101".U
def M_XA_MINU = "b01110".U
def M_XA_MAXU = "b01111".U
def M_FLUSH = "b10000".U // write back dirty data and cede R/W permissions
def M_PWR = "b10001".U // partial (masked) store
def M_PRODUCE = "b10010".U // write back dirty data and cede W permissions
def M_CLEAN = "b10011".U // write back dirty data and retain R/W permissions
def M_SFENCE = "b10100".U // SFENCE.VMA
def M_HFENCEV = "b10101".U // HFENCE.VVMA
def M_HFENCEG = "b10110".U // HFENCE.GVMA
def M_WOK = "b10111".U // check write permissions but don't perform a write
def M_HLVX = "b10000".U // HLVX instruction
def isAMOLogical(cmd: UInt) = cmd.isOneOf(M_XA_SWAP, M_XA_XOR, M_XA_OR, M_XA_AND)
def isAMOArithmetic(cmd: UInt) = cmd.isOneOf(M_XA_ADD, M_XA_MIN, M_XA_MAX, M_XA_MINU, M_XA_MAXU)
def isAMO(cmd: UInt) = isAMOLogical(cmd) || isAMOArithmetic(cmd)
def isPrefetch(cmd: UInt) = cmd === M_PFR || cmd === M_PFW
def isRead(cmd: UInt) = cmd.isOneOf(M_XRD, M_HLVX, M_XLR, M_XSC) || isAMO(cmd)
def isWrite(cmd: UInt) = cmd === M_XWR || cmd === M_PWR || cmd === M_XSC || isAMO(cmd)
def isWriteIntent(cmd: UInt) = isWrite(cmd) || cmd === M_PFW || cmd === M_XLR
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File mshrs.scala:
//******************************************************************************
// Ported from Rocket-Chip
// See LICENSE.Berkeley and LICENSE.SiFive in Rocket-Chip for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.lsu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import freechips.rocketchip.rocket._
import boom.v4.common._
import boom.v4.exu.BrUpdateInfo
import boom.v4.util._
class BoomDCacheReqInternal(implicit p: Parameters) extends BoomDCacheReq()(p)
with HasL1HellaCacheParameters
{
// miss info
val tag_match = Bool()
val old_meta = new L1Metadata
val way_en = UInt(nWays.W)
// Used in the MSHRs
val sdq_id = UInt(log2Ceil(cfg.nSDQ).W)
}
class BoomMSHR(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p)
with HasL1HellaCacheParameters
{
val io = IO(new Bundle {
val id = Input(UInt())
val req_pri_val = Input(Bool())
val req_pri_rdy = Output(Bool())
val req_sec_val = Input(Bool())
val req_sec_rdy = Output(Bool())
val clear_prefetch = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val exception = Input(Bool())
val rob_pnr_idx = Input(UInt(robAddrSz.W))
val rob_head_idx = Input(UInt(robAddrSz.W))
val req = Input(new BoomDCacheReqInternal)
val req_is_probe = Input(Bool())
val idx = Output(Valid(UInt()))
val way = Output(Valid(UInt()))
val tag = Output(Valid(UInt()))
val mem_acquire = Decoupled(new TLBundleA(edge.bundle))
val mem_grant = Flipped(Decoupled(new TLBundleD(edge.bundle)))
val mem_finish = Decoupled(new TLBundleE(edge.bundle))
val prober_state = Input(Valid(UInt(coreMaxAddrBits.W)))
val refill = Decoupled(new L1DataWriteReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val meta_read = Decoupled(new L1MetaReadReq)
val meta_resp = Input(Valid(new L1Metadata))
val wb_req = Decoupled(new WritebackReq(edge.bundle))
// To inform the prefetcher when we are commiting the fetch of this line
val commit_val = Output(Bool())
val commit_addr = Output(UInt(coreMaxAddrBits.W))
val commit_coh = Output(new ClientMetadata)
// Reading from the line buffer
val lb_read = Output(new LineBufferReadReq)
val lb_resp = Input(UInt(encRowBits.W))
val lb_write = Output(Valid(new LineBufferWriteReq))
// Replays go through the cache pipeline again
val replay = Decoupled(new BoomDCacheReqInternal)
// Resp go straight out to the core
val resp = Decoupled(new BoomDCacheResp)
// Writeback unit tells us when it is done processing our wb
val wb_resp = Input(Bool())
val probe_rdy = Output(Bool())
})
// TODO: Optimize this. We don't want to mess with cache during speculation
// s_refill_req : Make a request for a new cache line
// s_refill_resp : Store the refill response into our buffer
// s_drain_rpq_loads : Drain out loads from the rpq
// : If miss was misspeculated, go to s_invalid
// s_wb_req : Write back the evicted cache line
// s_wb_resp : Finish writing back the evicted cache line
// s_meta_write_req : Write the metadata for new cache lne
// s_meta_write_resp :
val s_invalid :: s_refill_req :: s_refill_resp :: s_drain_rpq_loads :: s_meta_read :: s_meta_resp_1 :: s_meta_resp_2 :: s_meta_clear :: s_wb_meta_read :: s_wb_req :: s_wb_resp :: s_commit_line :: s_drain_rpq :: s_meta_write_req :: s_mem_finish_1 :: s_mem_finish_2 :: s_prefetched :: s_prefetch :: Nil = Enum(18)
val state = RegInit(s_invalid)
val req = Reg(new BoomDCacheReqInternal)
val req_idx = req.addr(untagBits-1, blockOffBits)
val req_tag = req.addr >> untagBits
val req_block_addr = (req.addr >> blockOffBits) << blockOffBits
val req_needs_wb = RegInit(false.B)
val new_coh = RegInit(ClientMetadata.onReset)
val (_, shrink_param, coh_on_clear) = req.old_meta.coh.onCacheControl(M_FLUSH)
val grow_param = new_coh.onAccess(req.uop.mem_cmd)._2
val coh_on_grant = new_coh.onGrant(req.uop.mem_cmd, io.mem_grant.bits.param)
// We only accept secondary misses if the original request had sufficient permissions
val (cmd_requires_second_acquire, is_hit_again, _, dirtier_coh, dirtier_cmd) =
new_coh.onSecondaryAccess(req.uop.mem_cmd, io.req.uop.mem_cmd)
val (_, _, refill_done, refill_address_inc) = edge.addr_inc(io.mem_grant)
val sec_rdy = (!cmd_requires_second_acquire && !io.req_is_probe &&
!state.isOneOf(s_invalid, s_meta_write_req, s_mem_finish_1, s_mem_finish_2))// Always accept secondary misses
val rpq = Module(new BranchKillableQueue(new BoomDCacheReqInternal, cfg.nRPQ, u => u.uses_ldq, fastDeq=true))
rpq.io.brupdate := io.brupdate
rpq.io.flush := io.exception
assert(!(state === s_invalid && !rpq.io.empty))
rpq.io.enq.valid := ((io.req_pri_val && io.req_pri_rdy) || (io.req_sec_val && io.req_sec_rdy)) && !isPrefetch(io.req.uop.mem_cmd)
rpq.io.enq.bits := io.req
rpq.io.deq.ready := false.B
val grantack = Reg(Valid(new TLBundleE(edge.bundle)))
val refill_ctr = Reg(UInt(log2Ceil(cacheDataBeats).W))
val commit_line = Reg(Bool())
val grant_had_data = Reg(Bool())
val finish_to_prefetch = Reg(Bool())
// Block probes if a tag write we started is still in the pipeline
val meta_hazard = RegInit(0.U(2.W))
when (meta_hazard =/= 0.U) { meta_hazard := meta_hazard + 1.U }
when (io.meta_write.fire) { meta_hazard := 1.U }
io.probe_rdy := (meta_hazard === 0.U && (state.isOneOf(s_invalid, s_refill_req, s_refill_resp, s_drain_rpq_loads) || (state === s_meta_read && grantack.valid)))
io.idx.valid := state =/= s_invalid
io.tag.valid := state =/= s_invalid
io.way.valid := !state.isOneOf(s_invalid, s_prefetch)
io.idx.bits := req_idx
io.tag.bits := req_tag
io.way.bits := req.way_en
io.meta_write.valid := false.B
io.meta_write.bits.idx := req_idx
io.meta_write.bits.data.coh := coh_on_clear
io.meta_write.bits.data.tag := req_tag
io.meta_write.bits.way_en := req.way_en
io.meta_write.bits.tag := req_tag
io.req_pri_rdy := false.B
io.req_sec_rdy := sec_rdy && rpq.io.enq.ready
io.mem_acquire.valid := false.B
// TODO: Use AcquirePerm if just doing permissions acquire
io.mem_acquire.bits := edge.AcquireBlock(
fromSource = io.id,
toAddress = Cat(req_tag, req_idx) << blockOffBits,
lgSize = lgCacheBlockBytes.U,
growPermissions = grow_param)._2
io.refill.valid := false.B
io.refill.bits.addr := req_block_addr | (refill_ctr << rowOffBits)
io.refill.bits.way_en := req.way_en
io.refill.bits.wmask := ~(0.U(rowWords.W))
io.refill.bits.data := io.lb_resp
io.replay.valid := false.B
io.replay.bits := rpq.io.deq.bits
io.wb_req.valid := false.B
io.wb_req.bits.tag := req.old_meta.tag
io.wb_req.bits.idx := req_idx
io.wb_req.bits.param := shrink_param
io.wb_req.bits.way_en := req.way_en
io.wb_req.bits.source := io.id
io.wb_req.bits.voluntary := true.B
io.resp.valid := false.B
io.resp.bits := rpq.io.deq.bits
io.commit_val := false.B
io.commit_addr := req.addr
io.commit_coh := coh_on_grant
io.meta_read.valid := false.B
io.meta_read.bits.idx := req_idx
io.meta_read.bits.tag := req_tag
io.meta_read.bits.way_en := req.way_en
io.mem_finish.valid := false.B
io.mem_finish.bits := grantack.bits
io.lb_write.valid := false.B
io.lb_write.bits.offset := refill_address_inc >> rowOffBits
io.lb_write.bits.data := io.mem_grant.bits.data
io.mem_grant.ready := false.B
io.lb_read.offset := rpq.io.deq.bits.addr >> rowOffBits
when (io.req_sec_val && io.req_sec_rdy) {
req.uop.mem_cmd := dirtier_cmd
when (is_hit_again) {
new_coh := dirtier_coh
}
}
def handle_pri_req(old_state: UInt): UInt = {
val new_state = WireInit(old_state)
grantack.valid := false.B
refill_ctr := 0.U
assert(rpq.io.enq.ready)
req := io.req
val old_coh = io.req.old_meta.coh
req_needs_wb := old_coh.onCacheControl(M_FLUSH)._1 // does the line we are evicting need to be written back
when (io.req.tag_match) {
val (is_hit, _, coh_on_hit) = old_coh.onAccess(io.req.uop.mem_cmd)
when (is_hit) { // set dirty bit
assert(isWrite(io.req.uop.mem_cmd))
new_coh := coh_on_hit
new_state := s_drain_rpq
} .otherwise { // upgrade permissions
new_coh := old_coh
new_state := s_refill_req
}
} .otherwise { // refill and writeback if necessary
new_coh := ClientMetadata.onReset
new_state := s_refill_req
}
new_state
}
when (state === s_invalid) {
io.req_pri_rdy := true.B
grant_had_data := false.B
when (io.req_pri_val && io.req_pri_rdy) {
state := handle_pri_req(state)
}
} .elsewhen (state === s_refill_req) {
io.mem_acquire.valid := true.B
when (io.mem_acquire.fire) {
state := s_refill_resp
}
} .elsewhen (state === s_refill_resp) {
io.mem_grant.ready := true.B
when (edge.hasData(io.mem_grant.bits)) {
io.lb_write.valid := io.mem_grant.valid
} .otherwise {
io.mem_grant.ready := true.B
}
when (io.mem_grant.fire) {
grant_had_data := edge.hasData(io.mem_grant.bits)
}
when (refill_done) {
grantack.valid := edge.isRequest(io.mem_grant.bits)
grantack.bits := edge.GrantAck(io.mem_grant.bits)
state := Mux(grant_had_data, s_drain_rpq_loads, s_drain_rpq)
assert(!(!grant_had_data && req_needs_wb))
commit_line := false.B
new_coh := coh_on_grant
}
} .elsewhen (state === s_drain_rpq_loads) {
val drain_load = (isRead(rpq.io.deq.bits.uop.mem_cmd) &&
!isWrite(rpq.io.deq.bits.uop.mem_cmd) &&
(rpq.io.deq.bits.uop.mem_cmd =/= M_XLR)) // LR should go through replay
// drain all loads for now
val rp_addr = Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0))
val word_idx = if (rowWords == 1) 0.U else rp_addr(log2Up(rowWords*coreDataBytes)-1, log2Up(wordBytes))
val data = io.lb_resp
val data_word = data >> Cat(word_idx, 0.U(log2Up(coreDataBits).W))
val loadgen = new LoadGen(rpq.io.deq.bits.uop.mem_size, rpq.io.deq.bits.uop.mem_signed,
Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0)),
data_word, false.B, wordBytes)
rpq.io.deq.ready := io.resp.ready && drain_load
io.lb_read.offset := rpq.io.deq.bits.addr >> rowOffBits
io.resp.valid := rpq.io.deq.valid && drain_load
io.resp.bits.data := loadgen.data
io.resp.bits.is_hella := rpq.io.deq.bits.is_hella
when (rpq.io.deq.fire) {
commit_line := true.B
}
.elsewhen (rpq.io.empty && !commit_line)
{
when (!rpq.io.enq.fire) {
state := s_mem_finish_1
finish_to_prefetch := enablePrefetching.B
}
} .elsewhen (rpq.io.empty || (rpq.io.deq.valid && !drain_load)) {
// io.commit_val is for the prefetcher. it tells the prefetcher that this line was correctly acquired
// The prefetcher should consider fetching the next line
io.commit_val := true.B
state := s_meta_read
}
} .elsewhen (state === s_meta_read) {
io.meta_read.valid := !io.prober_state.valid || !grantack.valid || (io.prober_state.bits(untagBits-1,blockOffBits) =/= req_idx)
when (io.meta_read.fire) {
state := s_meta_resp_1
}
} .elsewhen (state === s_meta_resp_1) {
state := s_meta_resp_2
} .elsewhen (state === s_meta_resp_2) {
val needs_wb = io.meta_resp.bits.coh.onCacheControl(M_FLUSH)._1
state := Mux(!io.meta_resp.valid, s_meta_read, // Prober could have nack'd this read
Mux(needs_wb, s_meta_clear, s_commit_line))
} .elsewhen (state === s_meta_clear) {
io.meta_write.valid := true.B
when (io.meta_write.fire) {
state := s_wb_req
}
} .elsewhen (state === s_wb_req) {
io.wb_req.valid := true.B
when (io.wb_req.fire) {
state := s_wb_resp
}
} .elsewhen (state === s_wb_resp) {
when (io.wb_resp) {
state := s_commit_line
}
} .elsewhen (state === s_commit_line) {
io.lb_read.offset := refill_ctr
io.refill.valid := true.B
when (io.refill.fire) {
refill_ctr := refill_ctr + 1.U
when (refill_ctr === (cacheDataBeats - 1).U) {
state := s_drain_rpq
}
}
} .elsewhen (state === s_drain_rpq) {
io.replay <> rpq.io.deq
io.replay.bits.way_en := req.way_en
io.replay.bits.addr := Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0))
when (io.replay.fire && isWrite(rpq.io.deq.bits.uop.mem_cmd)) {
// Set dirty bit
val (is_hit, _, coh_on_hit) = new_coh.onAccess(rpq.io.deq.bits.uop.mem_cmd)
assert(is_hit, "We still don't have permissions for this store")
new_coh := coh_on_hit
}
when (rpq.io.empty && !rpq.io.enq.valid) {
state := s_meta_write_req
}
} .elsewhen (state === s_meta_write_req) {
io.meta_write.valid := true.B
io.meta_write.bits.idx := req_idx
io.meta_write.bits.data.coh := new_coh
io.meta_write.bits.data.tag := req_tag
io.meta_write.bits.way_en := req.way_en
when (io.meta_write.fire) {
state := s_mem_finish_1
finish_to_prefetch := false.B
}
} .elsewhen (state === s_mem_finish_1) {
io.mem_finish.valid := grantack.valid
when (io.mem_finish.fire || !grantack.valid) {
grantack.valid := false.B
state := s_mem_finish_2
}
} .elsewhen (state === s_mem_finish_2) {
state := Mux(finish_to_prefetch, s_prefetch, s_invalid)
} .elsewhen (state === s_prefetch) {
io.req_pri_rdy := true.B
when ((io.req_sec_val && !io.req_sec_rdy) || io.clear_prefetch) {
state := s_invalid
} .elsewhen (io.req_sec_val && io.req_sec_rdy) {
val (is_hit, _, coh_on_hit) = new_coh.onAccess(io.req.uop.mem_cmd)
when (is_hit) { // Proceed with refill
new_coh := coh_on_hit
state := s_meta_read
} .otherwise { // Reacquire this line
new_coh := ClientMetadata.onReset
state := s_refill_req
}
} .elsewhen (io.req_pri_val && io.req_pri_rdy) {
grant_had_data := false.B
state := handle_pri_req(state)
}
}
}
class BoomIOMSHR(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p)
with HasL1HellaCacheParameters
{
val io = IO(new Bundle {
val req = Flipped(Decoupled(new BoomDCacheReq))
val resp = Decoupled(new BoomDCacheResp)
val mem_access = Decoupled(new TLBundleA(edge.bundle))
val mem_ack = Flipped(Valid(new TLBundleD(edge.bundle)))
// We don't need brupdate in here because uncacheable operations are guaranteed non-speculative
})
def beatOffset(addr: UInt) = addr.extract(beatOffBits-1, wordOffBits)
def wordFromBeat(addr: UInt, dat: UInt) = {
val shift = Cat(beatOffset(addr), 0.U((wordOffBits+log2Ceil(wordBytes)).W))
(dat >> shift)(wordBits-1, 0)
}
val req = Reg(new BoomDCacheReq)
val grant_word = Reg(UInt(wordBits.W))
val s_idle :: s_mem_access :: s_mem_ack :: s_resp :: Nil = Enum(4)
val state = RegInit(s_idle)
io.req.ready := state === s_idle
val loadgen = new LoadGen(req.uop.mem_size, req.uop.mem_signed, req.addr, grant_word, false.B, wordBytes)
val a_source = id.U
val a_address = req.addr
val a_size = req.uop.mem_size
val a_data = Fill(beatWords, req.data)
val get = edge.Get(a_source, a_address, a_size)._2
val put = edge.Put(a_source, a_address, a_size, a_data)._2
val atomics = if (edge.manager.anySupportLogical) {
MuxLookup(req.uop.mem_cmd, (0.U).asTypeOf(new TLBundleA(edge.bundle)))(Array(
M_XA_SWAP -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.SWAP)._2,
M_XA_XOR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.XOR) ._2,
M_XA_OR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.OR) ._2,
M_XA_AND -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.AND) ._2,
M_XA_ADD -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.ADD)._2,
M_XA_MIN -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MIN)._2,
M_XA_MAX -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAX)._2,
M_XA_MINU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MINU)._2,
M_XA_MAXU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAXU)._2))
} else {
// If no managers support atomics, assert fail if processor asks for them
assert(state === s_idle || !isAMO(req.uop.mem_cmd))
(0.U).asTypeOf(new TLBundleA(edge.bundle))
}
assert(state === s_idle || req.uop.mem_cmd =/= M_XSC)
io.mem_access.valid := state === s_mem_access
io.mem_access.bits := Mux(isAMO(req.uop.mem_cmd), atomics, Mux(isRead(req.uop.mem_cmd), get, put))
val send_resp = isRead(req.uop.mem_cmd)
io.resp.valid := (state === s_resp) && send_resp
io.resp.bits.uop := req.uop
io.resp.bits.data := loadgen.data
io.resp.bits.is_hella := req.is_hella
when (io.req.fire) {
req := io.req.bits
state := s_mem_access
}
when (io.mem_access.fire) {
state := s_mem_ack
}
when (state === s_mem_ack && io.mem_ack.valid) {
state := s_resp
when (isRead(req.uop.mem_cmd)) {
grant_word := wordFromBeat(req.addr, io.mem_ack.bits.data)
}
}
when (state === s_resp) {
when (!send_resp || io.resp.fire) {
state := s_idle
}
}
}
class LineBufferReadReq(implicit p: Parameters) extends BoomBundle()(p)
with HasL1HellaCacheParameters
{
val offset = UInt(log2Ceil(cacheDataBeats).W)
}
class LineBufferWriteReq(implicit p: Parameters) extends LineBufferReadReq()(p)
{
val data = UInt(encRowBits.W)
}
class LineBufferMetaWriteReq(implicit p: Parameters) extends BoomBundle()(p)
{
val id = UInt(log2Ceil(nLBEntries).W)
val coh = new ClientMetadata
val addr = UInt(coreMaxAddrBits.W)
}
class LineBufferMeta(implicit p: Parameters) extends BoomBundle()(p)
with HasL1HellaCacheParameters
{
val coh = new ClientMetadata
val addr = UInt(coreMaxAddrBits.W)
}
class BoomMSHRFile(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p)
with HasL1HellaCacheParameters
{
val io = IO(new Bundle {
val req = Flipped(Vec(lsuWidth, Decoupled(new BoomDCacheReqInternal))) // Req from s2 of DCache pipe
val req_is_probe = Input(Vec(lsuWidth, Bool()))
val resp = Decoupled(new BoomDCacheResp)
val secondary_miss = Output(Vec(lsuWidth, Bool()))
val block_hit = Output(Vec(lsuWidth, Bool()))
val brupdate = Input(new BrUpdateInfo)
val exception = Input(Bool())
val rob_pnr_idx = Input(UInt(robAddrSz.W))
val rob_head_idx = Input(UInt(robAddrSz.W))
val mem_acquire = Decoupled(new TLBundleA(edge.bundle))
val mem_grant = Flipped(Decoupled(new TLBundleD(edge.bundle)))
val mem_finish = Decoupled(new TLBundleE(edge.bundle))
val refill = Decoupled(new L1DataWriteReq)
val meta_write = Decoupled(new L1MetaWriteReq)
val meta_read = Decoupled(new L1MetaReadReq)
val meta_resp = Input(Valid(new L1Metadata))
val replay = Decoupled(new BoomDCacheReqInternal)
val prefetch = Decoupled(new BoomDCacheReq)
val wb_req = Decoupled(new WritebackReq(edge.bundle))
val prober_state = Input(Valid(UInt(coreMaxAddrBits.W)))
val clear_all = Input(Bool()) // Clears all uncommitted MSHRs to prepare for fence
val wb_resp = Input(Bool())
val fence_rdy = Output(Bool())
val probe_rdy = Output(Bool())
})
val req_idx = OHToUInt(io.req.map(_.valid))
val req = WireInit(io.req(req_idx))
val req_is_probe = io.req_is_probe(0)
for (w <- 0 until lsuWidth)
io.req(w).ready := false.B
val prefetcher: DataPrefetcher = if (enablePrefetching) Module(new NLPrefetcher)
else Module(new NullPrefetcher)
io.prefetch <> prefetcher.io.prefetch
val cacheable = edge.manager.supportsAcquireBFast(req.bits.addr, lgCacheBlockBytes.U)
// --------------------
// The MSHR SDQ
val sdq_val = RegInit(0.U(cfg.nSDQ.W))
val sdq_alloc_id = PriorityEncoder(~sdq_val(cfg.nSDQ-1,0))
val sdq_rdy = !sdq_val.andR
val sdq_enq = req.fire && cacheable && isWrite(req.bits.uop.mem_cmd)
val sdq = Mem(cfg.nSDQ, UInt(coreDataBits.W))
when (sdq_enq) {
sdq(sdq_alloc_id) := req.bits.data
}
// --------------------
// The LineBuffer Data
// Holds refilling lines, prefetched lines
val lb = Reg(Vec(nLBEntries, Vec(cacheDataBeats, UInt(encRowBits.W))))
def widthMap[T <: Data](f: Int => T) = VecInit((0 until lsuWidth).map(f))
val idx_matches = Wire(Vec(lsuWidth, Vec(cfg.nMSHRs, Bool())))
val tag_matches = Wire(Vec(lsuWidth, Vec(cfg.nMSHRs, Bool())))
val way_matches = Wire(Vec(lsuWidth, Vec(cfg.nMSHRs, Bool())))
val tag_match = widthMap(w => Mux1H(idx_matches(w), tag_matches(w)))
val idx_match = widthMap(w => idx_matches(w).reduce(_||_))
val way_match = widthMap(w => Mux1H(idx_matches(w), way_matches(w)))
val wb_tag_list = Wire(Vec(cfg.nMSHRs, UInt(tagBits.W)))
val meta_write_arb = Module(new Arbiter(new L1MetaWriteReq , cfg.nMSHRs))
val meta_read_arb = Module(new Arbiter(new L1MetaReadReq , cfg.nMSHRs))
val wb_req_arb = Module(new Arbiter(new WritebackReq(edge.bundle), cfg.nMSHRs))
val replay_arb = Module(new Arbiter(new BoomDCacheReqInternal , cfg.nMSHRs))
val resp_arb = Module(new Arbiter(new BoomDCacheResp , cfg.nMSHRs + nIOMSHRs))
val refill_arb = Module(new Arbiter(new L1DataWriteReq , cfg.nMSHRs))
val commit_vals = Wire(Vec(cfg.nMSHRs, Bool()))
val commit_addrs = Wire(Vec(cfg.nMSHRs, UInt(coreMaxAddrBits.W)))
val commit_cohs = Wire(Vec(cfg.nMSHRs, new ClientMetadata))
var sec_rdy = false.B
io.fence_rdy := true.B
io.probe_rdy := true.B
io.mem_grant.ready := false.B
val mshr_alloc_idx = Wire(UInt())
val pri_rdy = WireInit(false.B)
val pri_val = req.valid && sdq_rdy && cacheable && !idx_match(req_idx)
val mshrs = (0 until cfg.nMSHRs) map { i =>
val mshr = Module(new BoomMSHR)
mshr.io.id := i.U(log2Ceil(cfg.nMSHRs).W)
for (w <- 0 until lsuWidth) {
idx_matches(w)(i) := mshr.io.idx.valid && mshr.io.idx.bits === io.req(w).bits.addr(untagBits-1,blockOffBits)
tag_matches(w)(i) := mshr.io.tag.valid && mshr.io.tag.bits === io.req(w).bits.addr >> untagBits
way_matches(w)(i) := mshr.io.way.valid && mshr.io.way.bits === io.req(w).bits.way_en
}
wb_tag_list(i) := mshr.io.wb_req.bits.tag
mshr.io.req_pri_val := (i.U === mshr_alloc_idx) && pri_val
when (i.U === mshr_alloc_idx) {
pri_rdy := mshr.io.req_pri_rdy
}
mshr.io.req_sec_val := req.valid && sdq_rdy && tag_match(req_idx) && idx_matches(req_idx)(i) && cacheable
mshr.io.req := req.bits
mshr.io.req_is_probe := req_is_probe
mshr.io.req.sdq_id := sdq_alloc_id
// Clear because of a FENCE, a request to the same idx as a prefetched line,
// a probe to that prefetched line, all mshrs are in use
mshr.io.clear_prefetch := ((io.clear_all && !req.valid)||
(req.valid && idx_matches(req_idx)(i) && cacheable && !tag_match(req_idx)) ||
(req_is_probe && idx_matches(req_idx)(i)))
mshr.io.brupdate := io.brupdate
mshr.io.exception := io.exception
mshr.io.rob_pnr_idx := io.rob_pnr_idx
mshr.io.rob_head_idx := io.rob_head_idx
mshr.io.prober_state := io.prober_state
mshr.io.wb_resp := io.wb_resp
meta_write_arb.io.in(i) <> mshr.io.meta_write
meta_read_arb.io.in(i) <> mshr.io.meta_read
mshr.io.meta_resp := io.meta_resp
wb_req_arb.io.in(i) <> mshr.io.wb_req
replay_arb.io.in(i) <> mshr.io.replay
refill_arb.io.in(i) <> mshr.io.refill
mshr.io.lb_resp := lb(i)(mshr.io.lb_read.offset)
when (mshr.io.lb_write.valid) {
lb(i)(mshr.io.lb_write.bits.offset) := mshr.io.lb_write.bits.data
}
commit_vals(i) := mshr.io.commit_val
commit_addrs(i) := mshr.io.commit_addr
commit_cohs(i) := mshr.io.commit_coh
mshr.io.mem_grant.valid := false.B
mshr.io.mem_grant.bits := DontCare
when (io.mem_grant.bits.source === i.U) {
mshr.io.mem_grant <> io.mem_grant
}
sec_rdy = sec_rdy || (mshr.io.req_sec_rdy && mshr.io.req_sec_val)
resp_arb.io.in(i) <> mshr.io.resp
when (!mshr.io.req_pri_rdy) {
io.fence_rdy := false.B
}
for (w <- 0 until lsuWidth) {
when (!mshr.io.probe_rdy && idx_matches(w)(i) && io.req_is_probe(w)) {
io.probe_rdy := false.B
}
}
mshr
}
// Try to round-robin the MSHRs
val mshr_head = RegInit(0.U(log2Ceil(cfg.nMSHRs).W))
mshr_alloc_idx := RegNext(AgePriorityEncoder(mshrs.map(m=>m.io.req_pri_rdy), mshr_head))
when (pri_rdy && pri_val) { mshr_head := WrapInc(mshr_head, cfg.nMSHRs) }
io.meta_write <> meta_write_arb.io.out
io.meta_read <> meta_read_arb.io.out
io.wb_req <> wb_req_arb.io.out
val mmio_alloc_arb = Module(new Arbiter(Bool(), nIOMSHRs))
var mmio_rdy = false.B
val mmios = (0 until nIOMSHRs) map { i =>
val id = cfg.nMSHRs + 1 + i // +1 for wb unit
val mshr = Module(new BoomIOMSHR(id))
mmio_alloc_arb.io.in(i).valid := mshr.io.req.ready
mmio_alloc_arb.io.in(i).bits := DontCare
mshr.io.req.valid := mmio_alloc_arb.io.in(i).ready
mshr.io.req.bits := req.bits
mmio_rdy = mmio_rdy || mshr.io.req.ready
mshr.io.mem_ack.bits := io.mem_grant.bits
mshr.io.mem_ack.valid := io.mem_grant.valid && io.mem_grant.bits.source === id.U
when (io.mem_grant.bits.source === id.U) {
io.mem_grant.ready := true.B
}
resp_arb.io.in(cfg.nMSHRs + i) <> mshr.io.resp
when (!mshr.io.req.ready) {
io.fence_rdy := false.B
}
mshr
}
mmio_alloc_arb.io.out.ready := req.valid && !cacheable
TLArbiter.lowestFromSeq(edge, io.mem_acquire, mshrs.map(_.io.mem_acquire) ++ mmios.map(_.io.mem_access))
TLArbiter.lowestFromSeq(edge, io.mem_finish, mshrs.map(_.io.mem_finish))
val respq = Module(new BranchKillableQueue(new BoomDCacheResp, 4, u => u.uses_ldq))
respq.io.brupdate := io.brupdate
respq.io.flush := io.exception
respq.io.enq <> resp_arb.io.out
io.resp <> respq.io.deq
for (w <- 0 until lsuWidth) {
io.req(w).ready := (w.U === req_idx) &&
Mux(!cacheable, mmio_rdy, sdq_rdy && Mux(idx_match(w), tag_match(w) && sec_rdy, pri_rdy))
io.secondary_miss(w) := idx_match(w) && way_match(w) && !tag_match(w)
io.block_hit(w) := idx_match(w) && tag_match(w)
}
io.refill <> refill_arb.io.out
val free_sdq = io.replay.fire && isWrite(io.replay.bits.uop.mem_cmd)
io.replay <> replay_arb.io.out
io.replay.bits.data := sdq(replay_arb.io.out.bits.sdq_id)
when (io.replay.valid || sdq_enq) {
sdq_val := sdq_val & ~(UIntToOH(replay_arb.io.out.bits.sdq_id) & Fill(cfg.nSDQ, free_sdq)) |
PriorityEncoderOH(~sdq_val(cfg.nSDQ-1,0)) & Fill(cfg.nSDQ, sdq_enq)
}
prefetcher.io.mshr_avail := RegNext(pri_rdy)
prefetcher.io.req_val := RegNext(commit_vals.reduce(_||_))
prefetcher.io.req_addr := RegNext(Mux1H(commit_vals, commit_addrs))
prefetcher.io.req_coh := RegNext(Mux1H(commit_vals, commit_cohs))
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File AMOALU.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
class StoreGen(typ: UInt, addr: UInt, dat: UInt, maxSize: Int) {
val size = Wire(UInt(log2Up(log2Up(maxSize)+1).W))
size := typ
val dat_padded = dat.pad(maxSize*8)
def misaligned: Bool =
(addr & ((1.U << size) - 1.U)(log2Up(maxSize)-1,0)).orR
def mask = {
var res = 1.U
for (i <- 0 until log2Up(maxSize)) {
val upper = Mux(addr(i), res, 0.U) | Mux(size >= (i+1).U, ((BigInt(1) << (1 << i))-1).U, 0.U)
val lower = Mux(addr(i), 0.U, res)
res = Cat(upper, lower)
}
res
}
protected def genData(i: Int): UInt =
if (i >= log2Up(maxSize)) dat_padded
else Mux(size === i.U, Fill(1 << (log2Up(maxSize)-i), dat_padded((8 << i)-1,0)), genData(i+1))
def data = genData(0)
def wordData = genData(2)
}
class LoadGen(typ: UInt, signed: Bool, addr: UInt, dat: UInt, zero: Bool, maxSize: Int) {
private val size = new StoreGen(typ, addr, dat, maxSize).size
private def genData(logMinSize: Int): UInt = {
var res = dat
for (i <- log2Up(maxSize)-1 to logMinSize by -1) {
val pos = 8 << i
val shifted = Mux(addr(i), res(2*pos-1,pos), res(pos-1,0))
val doZero = (i == 0).B && zero
val zeroed = Mux(doZero, 0.U, shifted)
res = Cat(Mux(size === i.U || doZero, Fill(8*maxSize-pos, signed && zeroed(pos-1)), res(8*maxSize-1,pos)), zeroed)
}
res
}
def wordData = genData(2)
def data = genData(0)
}
class AMOALU(operandBits: Int)(implicit p: Parameters) extends Module {
val minXLen = 32
val widths = (0 to log2Ceil(operandBits / minXLen)).map(minXLen << _)
val io = IO(new Bundle {
val mask = Input(UInt((operandBits / 8).W))
val cmd = Input(UInt(M_SZ.W))
val lhs = Input(UInt(operandBits.W))
val rhs = Input(UInt(operandBits.W))
val out = Output(UInt(operandBits.W))
val out_unmasked = Output(UInt(operandBits.W))
})
val max = io.cmd === M_XA_MAX || io.cmd === M_XA_MAXU
val min = io.cmd === M_XA_MIN || io.cmd === M_XA_MINU
val add = io.cmd === M_XA_ADD
val logic_and = io.cmd === M_XA_OR || io.cmd === M_XA_AND
val logic_xor = io.cmd === M_XA_XOR || io.cmd === M_XA_OR
val adder_out = {
// partition the carry chain to support sub-xLen addition
val mask = ~(0.U(operandBits.W) +: widths.init.map(w => !io.mask(w/8-1) << (w-1))).reduce(_|_)
(io.lhs & mask) + (io.rhs & mask)
}
val less = {
// break up the comparator so the lower parts will be CSE'd
def isLessUnsigned(x: UInt, y: UInt, n: Int): Bool = {
if (n == minXLen) x(n-1, 0) < y(n-1, 0)
else x(n-1, n/2) < y(n-1, n/2) || x(n-1, n/2) === y(n-1, n/2) && isLessUnsigned(x, y, n/2)
}
def isLess(x: UInt, y: UInt, n: Int): Bool = {
val signed = {
val mask = M_XA_MIN ^ M_XA_MINU
(io.cmd & mask) === (M_XA_MIN & mask)
}
Mux(x(n-1) === y(n-1), isLessUnsigned(x, y, n), Mux(signed, x(n-1), y(n-1)))
}
PriorityMux(widths.reverse.map(w => (io.mask(w/8/2), isLess(io.lhs, io.rhs, w))))
}
val minmax = Mux(Mux(less, min, max), io.lhs, io.rhs)
val logic =
Mux(logic_and, io.lhs & io.rhs, 0.U) |
Mux(logic_xor, io.lhs ^ io.rhs, 0.U)
val out =
Mux(add, adder_out,
Mux(logic_and || logic_xor, logic,
minmax))
val wmask = FillInterleaved(8, io.mask)
io.out := wmask & out | ~wmask & io.lhs
io.out_unmasked := out
}
| module BoomMSHR_9( // @[mshrs.scala:36:7]
input clock, // @[mshrs.scala:36:7]
input reset, // @[mshrs.scala:36:7]
input io_req_pri_val, // @[mshrs.scala:39:14]
output io_req_pri_rdy, // @[mshrs.scala:39:14]
input io_req_sec_val, // @[mshrs.scala:39:14]
output io_req_sec_rdy, // @[mshrs.scala:39:14]
input io_clear_prefetch, // @[mshrs.scala:39:14]
input [4:0] io_rob_pnr_idx, // @[mshrs.scala:39:14]
input [4:0] io_rob_head_idx, // @[mshrs.scala:39:14]
input [31:0] io_req_uop_inst, // @[mshrs.scala:39:14]
input [31:0] io_req_uop_debug_inst, // @[mshrs.scala:39:14]
input io_req_uop_is_rvc, // @[mshrs.scala:39:14]
input [33:0] io_req_uop_debug_pc, // @[mshrs.scala:39:14]
input io_req_uop_iq_type_0, // @[mshrs.scala:39:14]
input io_req_uop_iq_type_1, // @[mshrs.scala:39:14]
input io_req_uop_iq_type_2, // @[mshrs.scala:39:14]
input io_req_uop_iq_type_3, // @[mshrs.scala:39:14]
input io_req_uop_fu_code_0, // @[mshrs.scala:39:14]
input io_req_uop_fu_code_1, // @[mshrs.scala:39:14]
input io_req_uop_fu_code_2, // @[mshrs.scala:39:14]
input io_req_uop_fu_code_3, // @[mshrs.scala:39:14]
input io_req_uop_fu_code_4, // @[mshrs.scala:39:14]
input io_req_uop_fu_code_5, // @[mshrs.scala:39:14]
input io_req_uop_fu_code_6, // @[mshrs.scala:39:14]
input io_req_uop_fu_code_7, // @[mshrs.scala:39:14]
input io_req_uop_fu_code_8, // @[mshrs.scala:39:14]
input io_req_uop_fu_code_9, // @[mshrs.scala:39:14]
input io_req_uop_iw_issued, // @[mshrs.scala:39:14]
input io_req_uop_iw_issued_partial_agen, // @[mshrs.scala:39:14]
input io_req_uop_iw_issued_partial_dgen, // @[mshrs.scala:39:14]
input io_req_uop_iw_p1_speculative_child, // @[mshrs.scala:39:14]
input io_req_uop_iw_p2_speculative_child, // @[mshrs.scala:39:14]
input io_req_uop_iw_p1_bypass_hint, // @[mshrs.scala:39:14]
input io_req_uop_iw_p2_bypass_hint, // @[mshrs.scala:39:14]
input io_req_uop_iw_p3_bypass_hint, // @[mshrs.scala:39:14]
input io_req_uop_dis_col_sel, // @[mshrs.scala:39:14]
input [3:0] io_req_uop_br_mask, // @[mshrs.scala:39:14]
input [1:0] io_req_uop_br_tag, // @[mshrs.scala:39:14]
input [3:0] io_req_uop_br_type, // @[mshrs.scala:39:14]
input io_req_uop_is_sfb, // @[mshrs.scala:39:14]
input io_req_uop_is_fence, // @[mshrs.scala:39:14]
input io_req_uop_is_fencei, // @[mshrs.scala:39:14]
input io_req_uop_is_sfence, // @[mshrs.scala:39:14]
input io_req_uop_is_amo, // @[mshrs.scala:39:14]
input io_req_uop_is_eret, // @[mshrs.scala:39:14]
input io_req_uop_is_sys_pc2epc, // @[mshrs.scala:39:14]
input io_req_uop_is_rocc, // @[mshrs.scala:39:14]
input io_req_uop_is_mov, // @[mshrs.scala:39:14]
input [3:0] io_req_uop_ftq_idx, // @[mshrs.scala:39:14]
input io_req_uop_edge_inst, // @[mshrs.scala:39:14]
input [5:0] io_req_uop_pc_lob, // @[mshrs.scala:39:14]
input io_req_uop_taken, // @[mshrs.scala:39:14]
input io_req_uop_imm_rename, // @[mshrs.scala:39:14]
input [2:0] io_req_uop_imm_sel, // @[mshrs.scala:39:14]
input [4:0] io_req_uop_pimm, // @[mshrs.scala:39:14]
input [19:0] io_req_uop_imm_packed, // @[mshrs.scala:39:14]
input [1:0] io_req_uop_op1_sel, // @[mshrs.scala:39:14]
input [2:0] io_req_uop_op2_sel, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_ldst, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_wen, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_ren1, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_ren2, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_ren3, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_swap12, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_swap23, // @[mshrs.scala:39:14]
input [1:0] io_req_uop_fp_ctrl_typeTagIn, // @[mshrs.scala:39:14]
input [1:0] io_req_uop_fp_ctrl_typeTagOut, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_fromint, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_toint, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_fastpipe, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_fma, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_div, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_sqrt, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_wflags, // @[mshrs.scala:39:14]
input io_req_uop_fp_ctrl_vec, // @[mshrs.scala:39:14]
input [4:0] io_req_uop_rob_idx, // @[mshrs.scala:39:14]
input [3:0] io_req_uop_ldq_idx, // @[mshrs.scala:39:14]
input [3:0] io_req_uop_stq_idx, // @[mshrs.scala:39:14]
input [1:0] io_req_uop_rxq_idx, // @[mshrs.scala:39:14]
input [5:0] io_req_uop_pdst, // @[mshrs.scala:39:14]
input [5:0] io_req_uop_prs1, // @[mshrs.scala:39:14]
input [5:0] io_req_uop_prs2, // @[mshrs.scala:39:14]
input [5:0] io_req_uop_prs3, // @[mshrs.scala:39:14]
input [3:0] io_req_uop_ppred, // @[mshrs.scala:39:14]
input io_req_uop_prs1_busy, // @[mshrs.scala:39:14]
input io_req_uop_prs2_busy, // @[mshrs.scala:39:14]
input io_req_uop_prs3_busy, // @[mshrs.scala:39:14]
input io_req_uop_ppred_busy, // @[mshrs.scala:39:14]
input [5:0] io_req_uop_stale_pdst, // @[mshrs.scala:39:14]
input io_req_uop_exception, // @[mshrs.scala:39:14]
input [63:0] io_req_uop_exc_cause, // @[mshrs.scala:39:14]
input [4:0] io_req_uop_mem_cmd, // @[mshrs.scala:39:14]
input [1:0] io_req_uop_mem_size, // @[mshrs.scala:39:14]
input io_req_uop_mem_signed, // @[mshrs.scala:39:14]
input io_req_uop_uses_ldq, // @[mshrs.scala:39:14]
input io_req_uop_uses_stq, // @[mshrs.scala:39:14]
input io_req_uop_is_unique, // @[mshrs.scala:39:14]
input io_req_uop_flush_on_commit, // @[mshrs.scala:39:14]
input [2:0] io_req_uop_csr_cmd, // @[mshrs.scala:39:14]
input io_req_uop_ldst_is_rs1, // @[mshrs.scala:39:14]
input [5:0] io_req_uop_ldst, // @[mshrs.scala:39:14]
input [5:0] io_req_uop_lrs1, // @[mshrs.scala:39:14]
input [5:0] io_req_uop_lrs2, // @[mshrs.scala:39:14]
input [5:0] io_req_uop_lrs3, // @[mshrs.scala:39:14]
input [1:0] io_req_uop_dst_rtype, // @[mshrs.scala:39:14]
input [1:0] io_req_uop_lrs1_rtype, // @[mshrs.scala:39:14]
input [1:0] io_req_uop_lrs2_rtype, // @[mshrs.scala:39:14]
input io_req_uop_frs3_en, // @[mshrs.scala:39:14]
input io_req_uop_fcn_dw, // @[mshrs.scala:39:14]
input [4:0] io_req_uop_fcn_op, // @[mshrs.scala:39:14]
input io_req_uop_fp_val, // @[mshrs.scala:39:14]
input [2:0] io_req_uop_fp_rm, // @[mshrs.scala:39:14]
input [1:0] io_req_uop_fp_typ, // @[mshrs.scala:39:14]
input io_req_uop_xcpt_pf_if, // @[mshrs.scala:39:14]
input io_req_uop_xcpt_ae_if, // @[mshrs.scala:39:14]
input io_req_uop_xcpt_ma_if, // @[mshrs.scala:39:14]
input io_req_uop_bp_debug_if, // @[mshrs.scala:39:14]
input io_req_uop_bp_xcpt_if, // @[mshrs.scala:39:14]
input [2:0] io_req_uop_debug_fsrc, // @[mshrs.scala:39:14]
input [2:0] io_req_uop_debug_tsrc, // @[mshrs.scala:39:14]
input [33:0] io_req_addr, // @[mshrs.scala:39:14]
input [63:0] io_req_data, // @[mshrs.scala:39:14]
input io_req_is_hella, // @[mshrs.scala:39:14]
input io_req_tag_match, // @[mshrs.scala:39:14]
input [1:0] io_req_old_meta_coh_state, // @[mshrs.scala:39:14]
input [21:0] io_req_old_meta_tag, // @[mshrs.scala:39:14]
input [1:0] io_req_way_en, // @[mshrs.scala:39:14]
input [4:0] io_req_sdq_id, // @[mshrs.scala:39:14]
input io_req_is_probe, // @[mshrs.scala:39:14]
output io_idx_valid, // @[mshrs.scala:39:14]
output [3:0] io_idx_bits, // @[mshrs.scala:39:14]
output io_way_valid, // @[mshrs.scala:39:14]
output [1:0] io_way_bits, // @[mshrs.scala:39:14]
output io_tag_valid, // @[mshrs.scala:39:14]
output [23:0] io_tag_bits, // @[mshrs.scala:39:14]
input io_mem_acquire_ready, // @[mshrs.scala:39:14]
output io_mem_acquire_valid, // @[mshrs.scala:39:14]
output [2:0] io_mem_acquire_bits_param, // @[mshrs.scala:39:14]
output [31:0] io_mem_acquire_bits_address, // @[mshrs.scala:39:14]
output io_mem_grant_ready, // @[mshrs.scala:39:14]
input io_mem_grant_valid, // @[mshrs.scala:39:14]
input [2:0] io_mem_grant_bits_opcode, // @[mshrs.scala:39:14]
input [1:0] io_mem_grant_bits_param, // @[mshrs.scala:39:14]
input [3:0] io_mem_grant_bits_size, // @[mshrs.scala:39:14]
input [3:0] io_mem_grant_bits_source, // @[mshrs.scala:39:14]
input [2:0] io_mem_grant_bits_sink, // @[mshrs.scala:39:14]
input io_mem_grant_bits_denied, // @[mshrs.scala:39:14]
input [63:0] io_mem_grant_bits_data, // @[mshrs.scala:39:14]
input io_mem_grant_bits_corrupt, // @[mshrs.scala:39:14]
input io_mem_finish_ready, // @[mshrs.scala:39:14]
output io_mem_finish_valid, // @[mshrs.scala:39:14]
output [2:0] io_mem_finish_bits_sink, // @[mshrs.scala:39:14]
input io_prober_state_valid, // @[mshrs.scala:39:14]
input [33:0] io_prober_state_bits, // @[mshrs.scala:39:14]
input io_refill_ready, // @[mshrs.scala:39:14]
output io_refill_valid, // @[mshrs.scala:39:14]
output [1:0] io_refill_bits_way_en, // @[mshrs.scala:39:14]
output [9:0] io_refill_bits_addr, // @[mshrs.scala:39:14]
output [63:0] io_refill_bits_data, // @[mshrs.scala:39:14]
input io_meta_write_ready, // @[mshrs.scala:39:14]
output io_meta_write_valid, // @[mshrs.scala:39:14]
output [3:0] io_meta_write_bits_idx, // @[mshrs.scala:39:14]
output [1:0] io_meta_write_bits_way_en, // @[mshrs.scala:39:14]
output [21:0] io_meta_write_bits_tag, // @[mshrs.scala:39:14]
output [1:0] io_meta_write_bits_data_coh_state, // @[mshrs.scala:39:14]
output [21:0] io_meta_write_bits_data_tag, // @[mshrs.scala:39:14]
input io_meta_read_ready, // @[mshrs.scala:39:14]
output io_meta_read_valid, // @[mshrs.scala:39:14]
output [3:0] io_meta_read_bits_idx, // @[mshrs.scala:39:14]
output [1:0] io_meta_read_bits_way_en, // @[mshrs.scala:39:14]
output [21:0] io_meta_read_bits_tag, // @[mshrs.scala:39:14]
input io_meta_resp_valid, // @[mshrs.scala:39:14]
input [1:0] io_meta_resp_bits_coh_state, // @[mshrs.scala:39:14]
input [21:0] io_meta_resp_bits_tag, // @[mshrs.scala:39:14]
input io_wb_req_ready, // @[mshrs.scala:39:14]
output io_wb_req_valid, // @[mshrs.scala:39:14]
output [21:0] io_wb_req_bits_tag, // @[mshrs.scala:39:14]
output [3:0] io_wb_req_bits_idx, // @[mshrs.scala:39:14]
output [2:0] io_wb_req_bits_param, // @[mshrs.scala:39:14]
output [1:0] io_wb_req_bits_way_en, // @[mshrs.scala:39:14]
output io_commit_val, // @[mshrs.scala:39:14]
output [33:0] io_commit_addr, // @[mshrs.scala:39:14]
output [1:0] io_commit_coh_state, // @[mshrs.scala:39:14]
output [2:0] io_lb_read_offset, // @[mshrs.scala:39:14]
input [63:0] io_lb_resp, // @[mshrs.scala:39:14]
output io_lb_write_valid, // @[mshrs.scala:39:14]
output [2:0] io_lb_write_bits_offset, // @[mshrs.scala:39:14]
output [63:0] io_lb_write_bits_data, // @[mshrs.scala:39:14]
input io_replay_ready, // @[mshrs.scala:39:14]
output io_replay_valid, // @[mshrs.scala:39:14]
output [31:0] io_replay_bits_uop_inst, // @[mshrs.scala:39:14]
output [31:0] io_replay_bits_uop_debug_inst, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_rvc, // @[mshrs.scala:39:14]
output [33:0] io_replay_bits_uop_debug_pc, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iq_type_0, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iq_type_1, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iq_type_2, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iq_type_3, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fu_code_0, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fu_code_1, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fu_code_2, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fu_code_3, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fu_code_4, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fu_code_5, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fu_code_6, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fu_code_7, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fu_code_8, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fu_code_9, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iw_issued, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iw_issued_partial_agen, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iw_issued_partial_dgen, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iw_p1_speculative_child, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iw_p2_speculative_child, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iw_p1_bypass_hint, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iw_p2_bypass_hint, // @[mshrs.scala:39:14]
output io_replay_bits_uop_iw_p3_bypass_hint, // @[mshrs.scala:39:14]
output io_replay_bits_uop_dis_col_sel, // @[mshrs.scala:39:14]
output [3:0] io_replay_bits_uop_br_mask, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_uop_br_tag, // @[mshrs.scala:39:14]
output [3:0] io_replay_bits_uop_br_type, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_sfb, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_fence, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_fencei, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_sfence, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_amo, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_eret, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_sys_pc2epc, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_rocc, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_mov, // @[mshrs.scala:39:14]
output [3:0] io_replay_bits_uop_ftq_idx, // @[mshrs.scala:39:14]
output io_replay_bits_uop_edge_inst, // @[mshrs.scala:39:14]
output [5:0] io_replay_bits_uop_pc_lob, // @[mshrs.scala:39:14]
output io_replay_bits_uop_taken, // @[mshrs.scala:39:14]
output io_replay_bits_uop_imm_rename, // @[mshrs.scala:39:14]
output [2:0] io_replay_bits_uop_imm_sel, // @[mshrs.scala:39:14]
output [4:0] io_replay_bits_uop_pimm, // @[mshrs.scala:39:14]
output [19:0] io_replay_bits_uop_imm_packed, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_uop_op1_sel, // @[mshrs.scala:39:14]
output [2:0] io_replay_bits_uop_op2_sel, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_ldst, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_wen, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_ren1, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_ren2, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_ren3, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_swap12, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_swap23, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_uop_fp_ctrl_typeTagIn, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_uop_fp_ctrl_typeTagOut, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_fromint, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_toint, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_fastpipe, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_fma, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_div, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_sqrt, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_wflags, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_ctrl_vec, // @[mshrs.scala:39:14]
output [4:0] io_replay_bits_uop_rob_idx, // @[mshrs.scala:39:14]
output [3:0] io_replay_bits_uop_ldq_idx, // @[mshrs.scala:39:14]
output [3:0] io_replay_bits_uop_stq_idx, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_uop_rxq_idx, // @[mshrs.scala:39:14]
output [5:0] io_replay_bits_uop_pdst, // @[mshrs.scala:39:14]
output [5:0] io_replay_bits_uop_prs1, // @[mshrs.scala:39:14]
output [5:0] io_replay_bits_uop_prs2, // @[mshrs.scala:39:14]
output [5:0] io_replay_bits_uop_prs3, // @[mshrs.scala:39:14]
output [3:0] io_replay_bits_uop_ppred, // @[mshrs.scala:39:14]
output io_replay_bits_uop_prs1_busy, // @[mshrs.scala:39:14]
output io_replay_bits_uop_prs2_busy, // @[mshrs.scala:39:14]
output io_replay_bits_uop_prs3_busy, // @[mshrs.scala:39:14]
output io_replay_bits_uop_ppred_busy, // @[mshrs.scala:39:14]
output [5:0] io_replay_bits_uop_stale_pdst, // @[mshrs.scala:39:14]
output io_replay_bits_uop_exception, // @[mshrs.scala:39:14]
output [63:0] io_replay_bits_uop_exc_cause, // @[mshrs.scala:39:14]
output [4:0] io_replay_bits_uop_mem_cmd, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_uop_mem_size, // @[mshrs.scala:39:14]
output io_replay_bits_uop_mem_signed, // @[mshrs.scala:39:14]
output io_replay_bits_uop_uses_ldq, // @[mshrs.scala:39:14]
output io_replay_bits_uop_uses_stq, // @[mshrs.scala:39:14]
output io_replay_bits_uop_is_unique, // @[mshrs.scala:39:14]
output io_replay_bits_uop_flush_on_commit, // @[mshrs.scala:39:14]
output [2:0] io_replay_bits_uop_csr_cmd, // @[mshrs.scala:39:14]
output io_replay_bits_uop_ldst_is_rs1, // @[mshrs.scala:39:14]
output [5:0] io_replay_bits_uop_ldst, // @[mshrs.scala:39:14]
output [5:0] io_replay_bits_uop_lrs1, // @[mshrs.scala:39:14]
output [5:0] io_replay_bits_uop_lrs2, // @[mshrs.scala:39:14]
output [5:0] io_replay_bits_uop_lrs3, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_uop_dst_rtype, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_uop_lrs1_rtype, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_uop_lrs2_rtype, // @[mshrs.scala:39:14]
output io_replay_bits_uop_frs3_en, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fcn_dw, // @[mshrs.scala:39:14]
output [4:0] io_replay_bits_uop_fcn_op, // @[mshrs.scala:39:14]
output io_replay_bits_uop_fp_val, // @[mshrs.scala:39:14]
output [2:0] io_replay_bits_uop_fp_rm, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_uop_fp_typ, // @[mshrs.scala:39:14]
output io_replay_bits_uop_xcpt_pf_if, // @[mshrs.scala:39:14]
output io_replay_bits_uop_xcpt_ae_if, // @[mshrs.scala:39:14]
output io_replay_bits_uop_xcpt_ma_if, // @[mshrs.scala:39:14]
output io_replay_bits_uop_bp_debug_if, // @[mshrs.scala:39:14]
output io_replay_bits_uop_bp_xcpt_if, // @[mshrs.scala:39:14]
output [2:0] io_replay_bits_uop_debug_fsrc, // @[mshrs.scala:39:14]
output [2:0] io_replay_bits_uop_debug_tsrc, // @[mshrs.scala:39:14]
output [33:0] io_replay_bits_addr, // @[mshrs.scala:39:14]
output [63:0] io_replay_bits_data, // @[mshrs.scala:39:14]
output io_replay_bits_is_hella, // @[mshrs.scala:39:14]
output io_replay_bits_tag_match, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_old_meta_coh_state, // @[mshrs.scala:39:14]
output [21:0] io_replay_bits_old_meta_tag, // @[mshrs.scala:39:14]
output [1:0] io_replay_bits_way_en, // @[mshrs.scala:39:14]
output [4:0] io_replay_bits_sdq_id, // @[mshrs.scala:39:14]
input io_resp_ready, // @[mshrs.scala:39:14]
output io_resp_valid, // @[mshrs.scala:39:14]
output [31:0] io_resp_bits_uop_inst, // @[mshrs.scala:39:14]
output [31:0] io_resp_bits_uop_debug_inst, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_rvc, // @[mshrs.scala:39:14]
output [33:0] io_resp_bits_uop_debug_pc, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iq_type_0, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iq_type_1, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iq_type_2, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iq_type_3, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fu_code_0, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fu_code_1, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fu_code_2, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fu_code_3, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fu_code_4, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fu_code_5, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fu_code_6, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fu_code_7, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fu_code_8, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fu_code_9, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iw_issued, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iw_issued_partial_agen, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iw_issued_partial_dgen, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iw_p1_speculative_child, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iw_p2_speculative_child, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iw_p1_bypass_hint, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iw_p2_bypass_hint, // @[mshrs.scala:39:14]
output io_resp_bits_uop_iw_p3_bypass_hint, // @[mshrs.scala:39:14]
output io_resp_bits_uop_dis_col_sel, // @[mshrs.scala:39:14]
output [3:0] io_resp_bits_uop_br_mask, // @[mshrs.scala:39:14]
output [1:0] io_resp_bits_uop_br_tag, // @[mshrs.scala:39:14]
output [3:0] io_resp_bits_uop_br_type, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_sfb, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_fence, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_fencei, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_sfence, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_amo, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_eret, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_sys_pc2epc, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_rocc, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_mov, // @[mshrs.scala:39:14]
output [3:0] io_resp_bits_uop_ftq_idx, // @[mshrs.scala:39:14]
output io_resp_bits_uop_edge_inst, // @[mshrs.scala:39:14]
output [5:0] io_resp_bits_uop_pc_lob, // @[mshrs.scala:39:14]
output io_resp_bits_uop_taken, // @[mshrs.scala:39:14]
output io_resp_bits_uop_imm_rename, // @[mshrs.scala:39:14]
output [2:0] io_resp_bits_uop_imm_sel, // @[mshrs.scala:39:14]
output [4:0] io_resp_bits_uop_pimm, // @[mshrs.scala:39:14]
output [19:0] io_resp_bits_uop_imm_packed, // @[mshrs.scala:39:14]
output [1:0] io_resp_bits_uop_op1_sel, // @[mshrs.scala:39:14]
output [2:0] io_resp_bits_uop_op2_sel, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_ldst, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_wen, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_ren1, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_ren2, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_ren3, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_swap12, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_swap23, // @[mshrs.scala:39:14]
output [1:0] io_resp_bits_uop_fp_ctrl_typeTagIn, // @[mshrs.scala:39:14]
output [1:0] io_resp_bits_uop_fp_ctrl_typeTagOut, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_fromint, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_toint, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_fastpipe, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_fma, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_div, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_sqrt, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_wflags, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_ctrl_vec, // @[mshrs.scala:39:14]
output [4:0] io_resp_bits_uop_rob_idx, // @[mshrs.scala:39:14]
output [3:0] io_resp_bits_uop_ldq_idx, // @[mshrs.scala:39:14]
output [3:0] io_resp_bits_uop_stq_idx, // @[mshrs.scala:39:14]
output [1:0] io_resp_bits_uop_rxq_idx, // @[mshrs.scala:39:14]
output [5:0] io_resp_bits_uop_pdst, // @[mshrs.scala:39:14]
output [5:0] io_resp_bits_uop_prs1, // @[mshrs.scala:39:14]
output [5:0] io_resp_bits_uop_prs2, // @[mshrs.scala:39:14]
output [5:0] io_resp_bits_uop_prs3, // @[mshrs.scala:39:14]
output [3:0] io_resp_bits_uop_ppred, // @[mshrs.scala:39:14]
output io_resp_bits_uop_prs1_busy, // @[mshrs.scala:39:14]
output io_resp_bits_uop_prs2_busy, // @[mshrs.scala:39:14]
output io_resp_bits_uop_prs3_busy, // @[mshrs.scala:39:14]
output io_resp_bits_uop_ppred_busy, // @[mshrs.scala:39:14]
output [5:0] io_resp_bits_uop_stale_pdst, // @[mshrs.scala:39:14]
output io_resp_bits_uop_exception, // @[mshrs.scala:39:14]
output [63:0] io_resp_bits_uop_exc_cause, // @[mshrs.scala:39:14]
output [4:0] io_resp_bits_uop_mem_cmd, // @[mshrs.scala:39:14]
output [1:0] io_resp_bits_uop_mem_size, // @[mshrs.scala:39:14]
output io_resp_bits_uop_mem_signed, // @[mshrs.scala:39:14]
output io_resp_bits_uop_uses_ldq, // @[mshrs.scala:39:14]
output io_resp_bits_uop_uses_stq, // @[mshrs.scala:39:14]
output io_resp_bits_uop_is_unique, // @[mshrs.scala:39:14]
output io_resp_bits_uop_flush_on_commit, // @[mshrs.scala:39:14]
output [2:0] io_resp_bits_uop_csr_cmd, // @[mshrs.scala:39:14]
output io_resp_bits_uop_ldst_is_rs1, // @[mshrs.scala:39:14]
output [5:0] io_resp_bits_uop_ldst, // @[mshrs.scala:39:14]
output [5:0] io_resp_bits_uop_lrs1, // @[mshrs.scala:39:14]
output [5:0] io_resp_bits_uop_lrs2, // @[mshrs.scala:39:14]
output [5:0] io_resp_bits_uop_lrs3, // @[mshrs.scala:39:14]
output [1:0] io_resp_bits_uop_dst_rtype, // @[mshrs.scala:39:14]
output [1:0] io_resp_bits_uop_lrs1_rtype, // @[mshrs.scala:39:14]
output [1:0] io_resp_bits_uop_lrs2_rtype, // @[mshrs.scala:39:14]
output io_resp_bits_uop_frs3_en, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fcn_dw, // @[mshrs.scala:39:14]
output [4:0] io_resp_bits_uop_fcn_op, // @[mshrs.scala:39:14]
output io_resp_bits_uop_fp_val, // @[mshrs.scala:39:14]
output [2:0] io_resp_bits_uop_fp_rm, // @[mshrs.scala:39:14]
output [1:0] io_resp_bits_uop_fp_typ, // @[mshrs.scala:39:14]
output io_resp_bits_uop_xcpt_pf_if, // @[mshrs.scala:39:14]
output io_resp_bits_uop_xcpt_ae_if, // @[mshrs.scala:39:14]
output io_resp_bits_uop_xcpt_ma_if, // @[mshrs.scala:39:14]
output io_resp_bits_uop_bp_debug_if, // @[mshrs.scala:39:14]
output io_resp_bits_uop_bp_xcpt_if, // @[mshrs.scala:39:14]
output [2:0] io_resp_bits_uop_debug_fsrc, // @[mshrs.scala:39:14]
output [2:0] io_resp_bits_uop_debug_tsrc, // @[mshrs.scala:39:14]
output [63:0] io_resp_bits_data, // @[mshrs.scala:39:14]
output io_resp_bits_is_hella, // @[mshrs.scala:39:14]
input io_wb_resp, // @[mshrs.scala:39:14]
output io_probe_rdy // @[mshrs.scala:39:14]
);
wire rpq_io_deq_ready; // @[mshrs.scala:135:20, :234:30, :241:40, :246:41, :266:45]
wire _rpq_io_enq_ready; // @[mshrs.scala:128:19]
wire _rpq_io_deq_valid; // @[mshrs.scala:128:19]
wire [31:0] _rpq_io_deq_bits_uop_inst; // @[mshrs.scala:128:19]
wire [31:0] _rpq_io_deq_bits_uop_debug_inst; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_rvc; // @[mshrs.scala:128:19]
wire [33:0] _rpq_io_deq_bits_uop_debug_pc; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iq_type_0; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iq_type_1; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iq_type_2; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iq_type_3; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fu_code_0; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fu_code_1; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fu_code_2; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fu_code_3; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fu_code_4; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fu_code_5; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fu_code_6; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fu_code_7; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fu_code_8; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fu_code_9; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iw_issued; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iw_issued_partial_agen; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iw_issued_partial_dgen; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iw_p1_speculative_child; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iw_p2_speculative_child; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iw_p1_bypass_hint; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iw_p2_bypass_hint; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_iw_p3_bypass_hint; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_dis_col_sel; // @[mshrs.scala:128:19]
wire [3:0] _rpq_io_deq_bits_uop_br_mask; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_uop_br_tag; // @[mshrs.scala:128:19]
wire [3:0] _rpq_io_deq_bits_uop_br_type; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_sfb; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_fence; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_fencei; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_sfence; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_amo; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_eret; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_sys_pc2epc; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_rocc; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_mov; // @[mshrs.scala:128:19]
wire [3:0] _rpq_io_deq_bits_uop_ftq_idx; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_edge_inst; // @[mshrs.scala:128:19]
wire [5:0] _rpq_io_deq_bits_uop_pc_lob; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_taken; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_imm_rename; // @[mshrs.scala:128:19]
wire [2:0] _rpq_io_deq_bits_uop_imm_sel; // @[mshrs.scala:128:19]
wire [4:0] _rpq_io_deq_bits_uop_pimm; // @[mshrs.scala:128:19]
wire [19:0] _rpq_io_deq_bits_uop_imm_packed; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_uop_op1_sel; // @[mshrs.scala:128:19]
wire [2:0] _rpq_io_deq_bits_uop_op2_sel; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_ldst; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_wen; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_ren1; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_ren2; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_ren3; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_swap12; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_swap23; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_uop_fp_ctrl_typeTagIn; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_uop_fp_ctrl_typeTagOut; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_fromint; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_toint; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_fastpipe; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_fma; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_div; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_sqrt; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_wflags; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_ctrl_vec; // @[mshrs.scala:128:19]
wire [4:0] _rpq_io_deq_bits_uop_rob_idx; // @[mshrs.scala:128:19]
wire [3:0] _rpq_io_deq_bits_uop_ldq_idx; // @[mshrs.scala:128:19]
wire [3:0] _rpq_io_deq_bits_uop_stq_idx; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_uop_rxq_idx; // @[mshrs.scala:128:19]
wire [5:0] _rpq_io_deq_bits_uop_pdst; // @[mshrs.scala:128:19]
wire [5:0] _rpq_io_deq_bits_uop_prs1; // @[mshrs.scala:128:19]
wire [5:0] _rpq_io_deq_bits_uop_prs2; // @[mshrs.scala:128:19]
wire [5:0] _rpq_io_deq_bits_uop_prs3; // @[mshrs.scala:128:19]
wire [3:0] _rpq_io_deq_bits_uop_ppred; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_prs1_busy; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_prs2_busy; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_prs3_busy; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_ppred_busy; // @[mshrs.scala:128:19]
wire [5:0] _rpq_io_deq_bits_uop_stale_pdst; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_exception; // @[mshrs.scala:128:19]
wire [63:0] _rpq_io_deq_bits_uop_exc_cause; // @[mshrs.scala:128:19]
wire [4:0] _rpq_io_deq_bits_uop_mem_cmd; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_uop_mem_size; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_mem_signed; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_uses_ldq; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_uses_stq; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_is_unique; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_flush_on_commit; // @[mshrs.scala:128:19]
wire [2:0] _rpq_io_deq_bits_uop_csr_cmd; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_ldst_is_rs1; // @[mshrs.scala:128:19]
wire [5:0] _rpq_io_deq_bits_uop_ldst; // @[mshrs.scala:128:19]
wire [5:0] _rpq_io_deq_bits_uop_lrs1; // @[mshrs.scala:128:19]
wire [5:0] _rpq_io_deq_bits_uop_lrs2; // @[mshrs.scala:128:19]
wire [5:0] _rpq_io_deq_bits_uop_lrs3; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_uop_dst_rtype; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_uop_lrs1_rtype; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_uop_lrs2_rtype; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_frs3_en; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fcn_dw; // @[mshrs.scala:128:19]
wire [4:0] _rpq_io_deq_bits_uop_fcn_op; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_fp_val; // @[mshrs.scala:128:19]
wire [2:0] _rpq_io_deq_bits_uop_fp_rm; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_uop_fp_typ; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_xcpt_pf_if; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_xcpt_ae_if; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_xcpt_ma_if; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_bp_debug_if; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_uop_bp_xcpt_if; // @[mshrs.scala:128:19]
wire [2:0] _rpq_io_deq_bits_uop_debug_fsrc; // @[mshrs.scala:128:19]
wire [2:0] _rpq_io_deq_bits_uop_debug_tsrc; // @[mshrs.scala:128:19]
wire [33:0] _rpq_io_deq_bits_addr; // @[mshrs.scala:128:19]
wire [63:0] _rpq_io_deq_bits_data; // @[mshrs.scala:128:19]
wire _rpq_io_deq_bits_is_hella; // @[mshrs.scala:128:19]
wire [1:0] _rpq_io_deq_bits_way_en; // @[mshrs.scala:128:19]
wire _rpq_io_empty; // @[mshrs.scala:128:19]
wire io_req_pri_val_0 = io_req_pri_val; // @[mshrs.scala:36:7]
wire io_req_sec_val_0 = io_req_sec_val; // @[mshrs.scala:36:7]
wire io_clear_prefetch_0 = io_clear_prefetch; // @[mshrs.scala:36:7]
wire [4:0] io_rob_pnr_idx_0 = io_rob_pnr_idx; // @[mshrs.scala:36:7]
wire [4:0] io_rob_head_idx_0 = io_rob_head_idx; // @[mshrs.scala:36:7]
wire [31:0] io_req_uop_inst_0 = io_req_uop_inst; // @[mshrs.scala:36:7]
wire [31:0] io_req_uop_debug_inst_0 = io_req_uop_debug_inst; // @[mshrs.scala:36:7]
wire io_req_uop_is_rvc_0 = io_req_uop_is_rvc; // @[mshrs.scala:36:7]
wire [33:0] io_req_uop_debug_pc_0 = io_req_uop_debug_pc; // @[mshrs.scala:36:7]
wire io_req_uop_iq_type_0_0 = io_req_uop_iq_type_0; // @[mshrs.scala:36:7]
wire io_req_uop_iq_type_1_0 = io_req_uop_iq_type_1; // @[mshrs.scala:36:7]
wire io_req_uop_iq_type_2_0 = io_req_uop_iq_type_2; // @[mshrs.scala:36:7]
wire io_req_uop_iq_type_3_0 = io_req_uop_iq_type_3; // @[mshrs.scala:36:7]
wire io_req_uop_fu_code_0_0 = io_req_uop_fu_code_0; // @[mshrs.scala:36:7]
wire io_req_uop_fu_code_1_0 = io_req_uop_fu_code_1; // @[mshrs.scala:36:7]
wire io_req_uop_fu_code_2_0 = io_req_uop_fu_code_2; // @[mshrs.scala:36:7]
wire io_req_uop_fu_code_3_0 = io_req_uop_fu_code_3; // @[mshrs.scala:36:7]
wire io_req_uop_fu_code_4_0 = io_req_uop_fu_code_4; // @[mshrs.scala:36:7]
wire io_req_uop_fu_code_5_0 = io_req_uop_fu_code_5; // @[mshrs.scala:36:7]
wire io_req_uop_fu_code_6_0 = io_req_uop_fu_code_6; // @[mshrs.scala:36:7]
wire io_req_uop_fu_code_7_0 = io_req_uop_fu_code_7; // @[mshrs.scala:36:7]
wire io_req_uop_fu_code_8_0 = io_req_uop_fu_code_8; // @[mshrs.scala:36:7]
wire io_req_uop_fu_code_9_0 = io_req_uop_fu_code_9; // @[mshrs.scala:36:7]
wire io_req_uop_iw_issued_0 = io_req_uop_iw_issued; // @[mshrs.scala:36:7]
wire io_req_uop_iw_issued_partial_agen_0 = io_req_uop_iw_issued_partial_agen; // @[mshrs.scala:36:7]
wire io_req_uop_iw_issued_partial_dgen_0 = io_req_uop_iw_issued_partial_dgen; // @[mshrs.scala:36:7]
wire io_req_uop_iw_p1_speculative_child_0 = io_req_uop_iw_p1_speculative_child; // @[mshrs.scala:36:7]
wire io_req_uop_iw_p2_speculative_child_0 = io_req_uop_iw_p2_speculative_child; // @[mshrs.scala:36:7]
wire io_req_uop_iw_p1_bypass_hint_0 = io_req_uop_iw_p1_bypass_hint; // @[mshrs.scala:36:7]
wire io_req_uop_iw_p2_bypass_hint_0 = io_req_uop_iw_p2_bypass_hint; // @[mshrs.scala:36:7]
wire io_req_uop_iw_p3_bypass_hint_0 = io_req_uop_iw_p3_bypass_hint; // @[mshrs.scala:36:7]
wire io_req_uop_dis_col_sel_0 = io_req_uop_dis_col_sel; // @[mshrs.scala:36:7]
wire [3:0] io_req_uop_br_mask_0 = io_req_uop_br_mask; // @[mshrs.scala:36:7]
wire [1:0] io_req_uop_br_tag_0 = io_req_uop_br_tag; // @[mshrs.scala:36:7]
wire [3:0] io_req_uop_br_type_0 = io_req_uop_br_type; // @[mshrs.scala:36:7]
wire io_req_uop_is_sfb_0 = io_req_uop_is_sfb; // @[mshrs.scala:36:7]
wire io_req_uop_is_fence_0 = io_req_uop_is_fence; // @[mshrs.scala:36:7]
wire io_req_uop_is_fencei_0 = io_req_uop_is_fencei; // @[mshrs.scala:36:7]
wire io_req_uop_is_sfence_0 = io_req_uop_is_sfence; // @[mshrs.scala:36:7]
wire io_req_uop_is_amo_0 = io_req_uop_is_amo; // @[mshrs.scala:36:7]
wire io_req_uop_is_eret_0 = io_req_uop_is_eret; // @[mshrs.scala:36:7]
wire io_req_uop_is_sys_pc2epc_0 = io_req_uop_is_sys_pc2epc; // @[mshrs.scala:36:7]
wire io_req_uop_is_rocc_0 = io_req_uop_is_rocc; // @[mshrs.scala:36:7]
wire io_req_uop_is_mov_0 = io_req_uop_is_mov; // @[mshrs.scala:36:7]
wire [3:0] io_req_uop_ftq_idx_0 = io_req_uop_ftq_idx; // @[mshrs.scala:36:7]
wire io_req_uop_edge_inst_0 = io_req_uop_edge_inst; // @[mshrs.scala:36:7]
wire [5:0] io_req_uop_pc_lob_0 = io_req_uop_pc_lob; // @[mshrs.scala:36:7]
wire io_req_uop_taken_0 = io_req_uop_taken; // @[mshrs.scala:36:7]
wire io_req_uop_imm_rename_0 = io_req_uop_imm_rename; // @[mshrs.scala:36:7]
wire [2:0] io_req_uop_imm_sel_0 = io_req_uop_imm_sel; // @[mshrs.scala:36:7]
wire [4:0] io_req_uop_pimm_0 = io_req_uop_pimm; // @[mshrs.scala:36:7]
wire [19:0] io_req_uop_imm_packed_0 = io_req_uop_imm_packed; // @[mshrs.scala:36:7]
wire [1:0] io_req_uop_op1_sel_0 = io_req_uop_op1_sel; // @[mshrs.scala:36:7]
wire [2:0] io_req_uop_op2_sel_0 = io_req_uop_op2_sel; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_ldst_0 = io_req_uop_fp_ctrl_ldst; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_wen_0 = io_req_uop_fp_ctrl_wen; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_ren1_0 = io_req_uop_fp_ctrl_ren1; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_ren2_0 = io_req_uop_fp_ctrl_ren2; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_ren3_0 = io_req_uop_fp_ctrl_ren3; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_swap12_0 = io_req_uop_fp_ctrl_swap12; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_swap23_0 = io_req_uop_fp_ctrl_swap23; // @[mshrs.scala:36:7]
wire [1:0] io_req_uop_fp_ctrl_typeTagIn_0 = io_req_uop_fp_ctrl_typeTagIn; // @[mshrs.scala:36:7]
wire [1:0] io_req_uop_fp_ctrl_typeTagOut_0 = io_req_uop_fp_ctrl_typeTagOut; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_fromint_0 = io_req_uop_fp_ctrl_fromint; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_toint_0 = io_req_uop_fp_ctrl_toint; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_fastpipe_0 = io_req_uop_fp_ctrl_fastpipe; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_fma_0 = io_req_uop_fp_ctrl_fma; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_div_0 = io_req_uop_fp_ctrl_div; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_sqrt_0 = io_req_uop_fp_ctrl_sqrt; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_wflags_0 = io_req_uop_fp_ctrl_wflags; // @[mshrs.scala:36:7]
wire io_req_uop_fp_ctrl_vec_0 = io_req_uop_fp_ctrl_vec; // @[mshrs.scala:36:7]
wire [4:0] io_req_uop_rob_idx_0 = io_req_uop_rob_idx; // @[mshrs.scala:36:7]
wire [3:0] io_req_uop_ldq_idx_0 = io_req_uop_ldq_idx; // @[mshrs.scala:36:7]
wire [3:0] io_req_uop_stq_idx_0 = io_req_uop_stq_idx; // @[mshrs.scala:36:7]
wire [1:0] io_req_uop_rxq_idx_0 = io_req_uop_rxq_idx; // @[mshrs.scala:36:7]
wire [5:0] io_req_uop_pdst_0 = io_req_uop_pdst; // @[mshrs.scala:36:7]
wire [5:0] io_req_uop_prs1_0 = io_req_uop_prs1; // @[mshrs.scala:36:7]
wire [5:0] io_req_uop_prs2_0 = io_req_uop_prs2; // @[mshrs.scala:36:7]
wire [5:0] io_req_uop_prs3_0 = io_req_uop_prs3; // @[mshrs.scala:36:7]
wire [3:0] io_req_uop_ppred_0 = io_req_uop_ppred; // @[mshrs.scala:36:7]
wire io_req_uop_prs1_busy_0 = io_req_uop_prs1_busy; // @[mshrs.scala:36:7]
wire io_req_uop_prs2_busy_0 = io_req_uop_prs2_busy; // @[mshrs.scala:36:7]
wire io_req_uop_prs3_busy_0 = io_req_uop_prs3_busy; // @[mshrs.scala:36:7]
wire io_req_uop_ppred_busy_0 = io_req_uop_ppred_busy; // @[mshrs.scala:36:7]
wire [5:0] io_req_uop_stale_pdst_0 = io_req_uop_stale_pdst; // @[mshrs.scala:36:7]
wire io_req_uop_exception_0 = io_req_uop_exception; // @[mshrs.scala:36:7]
wire [63:0] io_req_uop_exc_cause_0 = io_req_uop_exc_cause; // @[mshrs.scala:36:7]
wire [4:0] io_req_uop_mem_cmd_0 = io_req_uop_mem_cmd; // @[mshrs.scala:36:7]
wire [1:0] io_req_uop_mem_size_0 = io_req_uop_mem_size; // @[mshrs.scala:36:7]
wire io_req_uop_mem_signed_0 = io_req_uop_mem_signed; // @[mshrs.scala:36:7]
wire io_req_uop_uses_ldq_0 = io_req_uop_uses_ldq; // @[mshrs.scala:36:7]
wire io_req_uop_uses_stq_0 = io_req_uop_uses_stq; // @[mshrs.scala:36:7]
wire io_req_uop_is_unique_0 = io_req_uop_is_unique; // @[mshrs.scala:36:7]
wire io_req_uop_flush_on_commit_0 = io_req_uop_flush_on_commit; // @[mshrs.scala:36:7]
wire [2:0] io_req_uop_csr_cmd_0 = io_req_uop_csr_cmd; // @[mshrs.scala:36:7]
wire io_req_uop_ldst_is_rs1_0 = io_req_uop_ldst_is_rs1; // @[mshrs.scala:36:7]
wire [5:0] io_req_uop_ldst_0 = io_req_uop_ldst; // @[mshrs.scala:36:7]
wire [5:0] io_req_uop_lrs1_0 = io_req_uop_lrs1; // @[mshrs.scala:36:7]
wire [5:0] io_req_uop_lrs2_0 = io_req_uop_lrs2; // @[mshrs.scala:36:7]
wire [5:0] io_req_uop_lrs3_0 = io_req_uop_lrs3; // @[mshrs.scala:36:7]
wire [1:0] io_req_uop_dst_rtype_0 = io_req_uop_dst_rtype; // @[mshrs.scala:36:7]
wire [1:0] io_req_uop_lrs1_rtype_0 = io_req_uop_lrs1_rtype; // @[mshrs.scala:36:7]
wire [1:0] io_req_uop_lrs2_rtype_0 = io_req_uop_lrs2_rtype; // @[mshrs.scala:36:7]
wire io_req_uop_frs3_en_0 = io_req_uop_frs3_en; // @[mshrs.scala:36:7]
wire io_req_uop_fcn_dw_0 = io_req_uop_fcn_dw; // @[mshrs.scala:36:7]
wire [4:0] io_req_uop_fcn_op_0 = io_req_uop_fcn_op; // @[mshrs.scala:36:7]
wire io_req_uop_fp_val_0 = io_req_uop_fp_val; // @[mshrs.scala:36:7]
wire [2:0] io_req_uop_fp_rm_0 = io_req_uop_fp_rm; // @[mshrs.scala:36:7]
wire [1:0] io_req_uop_fp_typ_0 = io_req_uop_fp_typ; // @[mshrs.scala:36:7]
wire io_req_uop_xcpt_pf_if_0 = io_req_uop_xcpt_pf_if; // @[mshrs.scala:36:7]
wire io_req_uop_xcpt_ae_if_0 = io_req_uop_xcpt_ae_if; // @[mshrs.scala:36:7]
wire io_req_uop_xcpt_ma_if_0 = io_req_uop_xcpt_ma_if; // @[mshrs.scala:36:7]
wire io_req_uop_bp_debug_if_0 = io_req_uop_bp_debug_if; // @[mshrs.scala:36:7]
wire io_req_uop_bp_xcpt_if_0 = io_req_uop_bp_xcpt_if; // @[mshrs.scala:36:7]
wire [2:0] io_req_uop_debug_fsrc_0 = io_req_uop_debug_fsrc; // @[mshrs.scala:36:7]
wire [2:0] io_req_uop_debug_tsrc_0 = io_req_uop_debug_tsrc; // @[mshrs.scala:36:7]
wire [33:0] io_req_addr_0 = io_req_addr; // @[mshrs.scala:36:7]
wire [63:0] io_req_data_0 = io_req_data; // @[mshrs.scala:36:7]
wire io_req_is_hella_0 = io_req_is_hella; // @[mshrs.scala:36:7]
wire io_req_tag_match_0 = io_req_tag_match; // @[mshrs.scala:36:7]
wire [1:0] io_req_old_meta_coh_state_0 = io_req_old_meta_coh_state; // @[mshrs.scala:36:7]
wire [21:0] io_req_old_meta_tag_0 = io_req_old_meta_tag; // @[mshrs.scala:36:7]
wire [1:0] io_req_way_en_0 = io_req_way_en; // @[mshrs.scala:36:7]
wire [4:0] io_req_sdq_id_0 = io_req_sdq_id; // @[mshrs.scala:36:7]
wire io_req_is_probe_0 = io_req_is_probe; // @[mshrs.scala:36:7]
wire io_mem_acquire_ready_0 = io_mem_acquire_ready; // @[mshrs.scala:36:7]
wire io_mem_grant_valid_0 = io_mem_grant_valid; // @[mshrs.scala:36:7]
wire [2:0] io_mem_grant_bits_opcode_0 = io_mem_grant_bits_opcode; // @[mshrs.scala:36:7]
wire [1:0] io_mem_grant_bits_param_0 = io_mem_grant_bits_param; // @[mshrs.scala:36:7]
wire [3:0] io_mem_grant_bits_size_0 = io_mem_grant_bits_size; // @[mshrs.scala:36:7]
wire [3:0] io_mem_grant_bits_source_0 = io_mem_grant_bits_source; // @[mshrs.scala:36:7]
wire [2:0] io_mem_grant_bits_sink_0 = io_mem_grant_bits_sink; // @[mshrs.scala:36:7]
wire io_mem_grant_bits_denied_0 = io_mem_grant_bits_denied; // @[mshrs.scala:36:7]
wire [63:0] io_mem_grant_bits_data_0 = io_mem_grant_bits_data; // @[mshrs.scala:36:7]
wire io_mem_grant_bits_corrupt_0 = io_mem_grant_bits_corrupt; // @[mshrs.scala:36:7]
wire io_mem_finish_ready_0 = io_mem_finish_ready; // @[mshrs.scala:36:7]
wire io_prober_state_valid_0 = io_prober_state_valid; // @[mshrs.scala:36:7]
wire [33:0] io_prober_state_bits_0 = io_prober_state_bits; // @[mshrs.scala:36:7]
wire io_refill_ready_0 = io_refill_ready; // @[mshrs.scala:36:7]
wire io_meta_write_ready_0 = io_meta_write_ready; // @[mshrs.scala:36:7]
wire io_meta_read_ready_0 = io_meta_read_ready; // @[mshrs.scala:36:7]
wire io_meta_resp_valid_0 = io_meta_resp_valid; // @[mshrs.scala:36:7]
wire [1:0] io_meta_resp_bits_coh_state_0 = io_meta_resp_bits_coh_state; // @[mshrs.scala:36:7]
wire [21:0] io_meta_resp_bits_tag_0 = io_meta_resp_bits_tag; // @[mshrs.scala:36:7]
wire io_wb_req_ready_0 = io_wb_req_ready; // @[mshrs.scala:36:7]
wire [63:0] io_lb_resp_0 = io_lb_resp; // @[mshrs.scala:36:7]
wire io_replay_ready_0 = io_replay_ready; // @[mshrs.scala:36:7]
wire io_resp_ready_0 = io_resp_ready; // @[mshrs.scala:36:7]
wire io_wb_resp_0 = io_wb_resp; // @[mshrs.scala:36:7]
wire _state_T = reset; // @[mshrs.scala:213:11]
wire _state_T_26 = reset; // @[mshrs.scala:220:15]
wire _state_T_34 = reset; // @[mshrs.scala:213:11]
wire _state_T_60 = reset; // @[mshrs.scala:220:15]
wire [2:0] io_id = 3'h1; // @[mshrs.scala:36:7]
wire [3:0] io_brupdate_b1_resolve_mask = 4'h0; // @[mshrs.scala:36:7]
wire [3:0] io_brupdate_b1_mispredict_mask = 4'h0; // @[mshrs.scala:36:7]
wire [3:0] io_brupdate_b2_uop_br_mask = 4'h0; // @[mshrs.scala:36:7]
wire [3:0] io_brupdate_b2_uop_br_type = 4'h0; // @[mshrs.scala:36:7]
wire [3:0] io_brupdate_b2_uop_ftq_idx = 4'h0; // @[mshrs.scala:36:7]
wire [3:0] io_brupdate_b2_uop_ldq_idx = 4'h0; // @[mshrs.scala:36:7]
wire [3:0] io_brupdate_b2_uop_stq_idx = 4'h0; // @[mshrs.scala:36:7]
wire [3:0] io_brupdate_b2_uop_ppred = 4'h0; // @[mshrs.scala:36:7]
wire [3:0] _r_T_10 = 4'h0; // @[Metadata.scala:125:10]
wire [3:0] _grow_param_r_T_16 = 4'h0; // @[Metadata.scala:68:10]
wire [3:0] _coh_on_grant_T_4 = 4'h0; // @[Metadata.scala:87:10]
wire [3:0] _r1_T_16 = 4'h0; // @[Metadata.scala:68:10]
wire [3:0] _r2_T_16 = 4'h0; // @[Metadata.scala:68:10]
wire [3:0] _state_req_needs_wb_r_T_10 = 4'h0; // @[Metadata.scala:125:10]
wire [3:0] _state_r_T_16 = 4'h0; // @[Metadata.scala:68:10]
wire [3:0] _needs_wb_r_T_10 = 4'h0; // @[Metadata.scala:125:10]
wire [3:0] _r_T_80 = 4'h0; // @[Metadata.scala:68:10]
wire [3:0] _r_T_139 = 4'h0; // @[Metadata.scala:68:10]
wire [3:0] _state_req_needs_wb_r_T_74 = 4'h0; // @[Metadata.scala:125:10]
wire [3:0] _state_r_T_75 = 4'h0; // @[Metadata.scala:68:10]
wire [31:0] io_brupdate_b2_uop_inst = 32'h0; // @[mshrs.scala:36:7]
wire [31:0] io_brupdate_b2_uop_debug_inst = 32'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_rvc = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iq_type_0 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iq_type_1 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iq_type_2 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iq_type_3 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fu_code_0 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fu_code_1 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fu_code_2 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fu_code_3 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fu_code_4 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fu_code_5 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fu_code_6 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fu_code_7 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fu_code_8 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fu_code_9 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iw_issued = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iw_p1_speculative_child = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iw_p2_speculative_child = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_dis_col_sel = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_sfb = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_fence = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_fencei = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_sfence = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_amo = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_eret = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_sys_pc2epc = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_rocc = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_mov = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_edge_inst = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_taken = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_imm_rename = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_wen = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_toint = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_fma = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_div = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_ctrl_vec = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_prs1_busy = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_prs2_busy = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_prs3_busy = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_ppred_busy = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_exception = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_mem_signed = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_uses_ldq = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_uses_stq = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_is_unique = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_flush_on_commit = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_ldst_is_rs1 = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_frs3_en = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fcn_dw = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_fp_val = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_xcpt_pf_if = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_xcpt_ae_if = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_xcpt_ma_if = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_bp_debug_if = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_uop_bp_xcpt_if = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_mispredict = 1'h0; // @[mshrs.scala:36:7]
wire io_brupdate_b2_taken = 1'h0; // @[mshrs.scala:36:7]
wire io_exception = 1'h0; // @[mshrs.scala:36:7]
wire io_mem_acquire_bits_corrupt = 1'h0; // @[mshrs.scala:36:7]
wire _r_T_2 = 1'h0; // @[Metadata.scala:140:24]
wire _r_T_4 = 1'h0; // @[Metadata.scala:140:24]
wire _r_T_20 = 1'h0; // @[Misc.scala:38:9]
wire _r_T_24 = 1'h0; // @[Misc.scala:38:9]
wire _r_T_28 = 1'h0; // @[Misc.scala:38:9]
wire _grow_param_r_T_26 = 1'h0; // @[Misc.scala:35:9]
wire _grow_param_r_T_29 = 1'h0; // @[Misc.scala:35:9]
wire _grow_param_r_T_32 = 1'h0; // @[Misc.scala:35:9]
wire _grow_param_r_T_35 = 1'h0; // @[Misc.scala:35:9]
wire _grow_param_r_T_38 = 1'h0; // @[Misc.scala:35:9]
wire _r1_T_26 = 1'h0; // @[Misc.scala:35:9]
wire _r1_T_29 = 1'h0; // @[Misc.scala:35:9]
wire _r1_T_32 = 1'h0; // @[Misc.scala:35:9]
wire _r1_T_35 = 1'h0; // @[Misc.scala:35:9]
wire _r1_T_38 = 1'h0; // @[Misc.scala:35:9]
wire _r2_T_26 = 1'h0; // @[Misc.scala:35:9]
wire _r2_T_29 = 1'h0; // @[Misc.scala:35:9]
wire _r2_T_32 = 1'h0; // @[Misc.scala:35:9]
wire _r2_T_35 = 1'h0; // @[Misc.scala:35:9]
wire _r2_T_38 = 1'h0; // @[Misc.scala:35:9]
wire _io_mem_acquire_bits_legal_T = 1'h0; // @[Parameters.scala:684:29]
wire _io_mem_acquire_bits_legal_T_6 = 1'h0; // @[Parameters.scala:684:54]
wire _io_mem_acquire_bits_legal_T_15 = 1'h0; // @[Parameters.scala:686:26]
wire io_mem_acquire_bits_a_corrupt = 1'h0; // @[Edges.scala:346:17]
wire io_mem_acquire_bits_a_mask_sub_size = 1'h0; // @[Misc.scala:209:26]
wire _io_mem_acquire_bits_a_mask_sub_acc_T = 1'h0; // @[Misc.scala:215:38]
wire _io_mem_acquire_bits_a_mask_sub_acc_T_1 = 1'h0; // @[Misc.scala:215:38]
wire _io_mem_acquire_bits_a_mask_sub_acc_T_2 = 1'h0; // @[Misc.scala:215:38]
wire _io_mem_acquire_bits_a_mask_sub_acc_T_3 = 1'h0; // @[Misc.scala:215:38]
wire _state_req_needs_wb_r_T_2 = 1'h0; // @[Metadata.scala:140:24]
wire _state_req_needs_wb_r_T_4 = 1'h0; // @[Metadata.scala:140:24]
wire _state_req_needs_wb_r_T_20 = 1'h0; // @[Misc.scala:38:9]
wire _state_req_needs_wb_r_T_24 = 1'h0; // @[Misc.scala:38:9]
wire _state_req_needs_wb_r_T_28 = 1'h0; // @[Misc.scala:38:9]
wire _state_r_T_26 = 1'h0; // @[Misc.scala:35:9]
wire _state_r_T_29 = 1'h0; // @[Misc.scala:35:9]
wire _state_r_T_32 = 1'h0; // @[Misc.scala:35:9]
wire _state_r_T_35 = 1'h0; // @[Misc.scala:35:9]
wire _state_r_T_38 = 1'h0; // @[Misc.scala:35:9]
wire io_resp_bits_data_doZero = 1'h0; // @[AMOALU.scala:43:31]
wire io_resp_bits_data_doZero_1 = 1'h0; // @[AMOALU.scala:43:31]
wire io_resp_bits_data_doZero_2 = 1'h0; // @[AMOALU.scala:43:31]
wire _needs_wb_r_T_2 = 1'h0; // @[Metadata.scala:140:24]
wire _needs_wb_r_T_4 = 1'h0; // @[Metadata.scala:140:24]
wire _needs_wb_r_T_20 = 1'h0; // @[Misc.scala:38:9]
wire _needs_wb_r_T_24 = 1'h0; // @[Misc.scala:38:9]
wire _needs_wb_r_T_28 = 1'h0; // @[Misc.scala:38:9]
wire _r_T_90 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_93 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_96 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_99 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_102 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_149 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_152 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_155 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_158 = 1'h0; // @[Misc.scala:35:9]
wire _r_T_161 = 1'h0; // @[Misc.scala:35:9]
wire _state_req_needs_wb_r_T_66 = 1'h0; // @[Metadata.scala:140:24]
wire _state_req_needs_wb_r_T_68 = 1'h0; // @[Metadata.scala:140:24]
wire _state_req_needs_wb_r_T_84 = 1'h0; // @[Misc.scala:38:9]
wire _state_req_needs_wb_r_T_88 = 1'h0; // @[Misc.scala:38:9]
wire _state_req_needs_wb_r_T_92 = 1'h0; // @[Misc.scala:38:9]
wire _state_r_T_85 = 1'h0; // @[Misc.scala:35:9]
wire _state_r_T_88 = 1'h0; // @[Misc.scala:35:9]
wire _state_r_T_91 = 1'h0; // @[Misc.scala:35:9]
wire _state_r_T_94 = 1'h0; // @[Misc.scala:35:9]
wire _state_r_T_97 = 1'h0; // @[Misc.scala:35:9]
wire [33:0] io_brupdate_b2_uop_debug_pc = 34'h0; // @[mshrs.scala:36:7]
wire [33:0] io_brupdate_b2_jalr_target = 34'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_uop_br_tag = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_uop_op1_sel = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_uop_mem_size = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_uop_fp_typ = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] io_brupdate_b2_pc_sel = 2'h0; // @[mshrs.scala:36:7]
wire [1:0] new_coh_meta_state = 2'h0; // @[Metadata.scala:160:20]
wire [1:0] _r_T_22 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_26 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_30 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_34 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_38 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _grow_param_r_T_1 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _grow_param_r_T_3 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _grow_param_r_T_5 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _grow_param_r_T_15 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _coh_on_grant_T_1 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _coh_on_grant_T_3 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r1_T_1 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r1_T_3 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r1_T_5 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r1_T_15 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r2_T_1 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r2_T_3 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r2_T_5 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r2_T_15 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _state_req_needs_wb_r_T_22 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _state_req_needs_wb_r_T_26 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _state_req_needs_wb_r_T_30 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _state_req_needs_wb_r_T_34 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _state_req_needs_wb_r_T_38 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _state_r_T_1 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _state_r_T_3 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _state_r_T_5 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _state_r_T_15 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] state_new_coh_meta_state = 2'h0; // @[Metadata.scala:160:20]
wire [1:0] _needs_wb_r_T_22 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _needs_wb_r_T_26 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _needs_wb_r_T_30 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _needs_wb_r_T_34 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _needs_wb_r_T_38 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _r_T_65 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_67 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_69 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_79 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_124 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_126 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_128 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _r_T_138 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] new_coh_meta_1_state = 2'h0; // @[Metadata.scala:160:20]
wire [1:0] _state_req_needs_wb_r_T_86 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _state_req_needs_wb_r_T_90 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _state_req_needs_wb_r_T_94 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _state_req_needs_wb_r_T_98 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _state_req_needs_wb_r_T_102 = 2'h0; // @[Misc.scala:38:63]
wire [1:0] _state_r_T_60 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _state_r_T_62 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _state_r_T_64 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] _state_r_T_74 = 2'h0; // @[Metadata.scala:26:15]
wire [1:0] state_new_coh_meta_1_state = 2'h0; // @[Metadata.scala:160:20]
wire [5:0] io_brupdate_b2_uop_pc_lob = 6'h0; // @[mshrs.scala:36:7]
wire [5:0] io_brupdate_b2_uop_pdst = 6'h0; // @[mshrs.scala:36:7]
wire [5:0] io_brupdate_b2_uop_prs1 = 6'h0; // @[mshrs.scala:36:7]
wire [5:0] io_brupdate_b2_uop_prs2 = 6'h0; // @[mshrs.scala:36:7]
wire [5:0] io_brupdate_b2_uop_prs3 = 6'h0; // @[mshrs.scala:36:7]
wire [5:0] io_brupdate_b2_uop_stale_pdst = 6'h0; // @[mshrs.scala:36:7]
wire [5:0] io_brupdate_b2_uop_ldst = 6'h0; // @[mshrs.scala:36:7]
wire [5:0] io_brupdate_b2_uop_lrs1 = 6'h0; // @[mshrs.scala:36:7]
wire [5:0] io_brupdate_b2_uop_lrs2 = 6'h0; // @[mshrs.scala:36:7]
wire [5:0] io_brupdate_b2_uop_lrs3 = 6'h0; // @[mshrs.scala:36:7]
wire [2:0] io_brupdate_b2_uop_imm_sel = 3'h0; // @[mshrs.scala:36:7]
wire [2:0] io_brupdate_b2_uop_op2_sel = 3'h0; // @[mshrs.scala:36:7]
wire [2:0] io_brupdate_b2_uop_csr_cmd = 3'h0; // @[mshrs.scala:36:7]
wire [2:0] io_brupdate_b2_uop_fp_rm = 3'h0; // @[mshrs.scala:36:7]
wire [2:0] io_brupdate_b2_uop_debug_fsrc = 3'h0; // @[mshrs.scala:36:7]
wire [2:0] io_brupdate_b2_uop_debug_tsrc = 3'h0; // @[mshrs.scala:36:7]
wire [2:0] io_brupdate_b2_cfi_type = 3'h0; // @[mshrs.scala:36:7]
wire [4:0] io_brupdate_b2_uop_pimm = 5'h0; // @[mshrs.scala:36:7]
wire [4:0] io_brupdate_b2_uop_rob_idx = 5'h0; // @[mshrs.scala:36:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd = 5'h0; // @[mshrs.scala:36:7]
wire [4:0] io_brupdate_b2_uop_fcn_op = 5'h0; // @[mshrs.scala:36:7]
wire [19:0] io_brupdate_b2_uop_imm_packed = 20'h0; // @[mshrs.scala:36:7]
wire [63:0] io_brupdate_b2_uop_exc_cause = 64'h0; // @[mshrs.scala:36:7]
wire [63:0] io_mem_acquire_bits_data = 64'h0; // @[mshrs.scala:36:7]
wire [63:0] io_mem_acquire_bits_a_data = 64'h0; // @[Edges.scala:346:17]
wire [20:0] io_brupdate_b2_target_offset = 21'h0; // @[mshrs.scala:36:7]
wire [2:0] io_mem_acquire_bits_opcode = 3'h6; // @[mshrs.scala:36:7]
wire [2:0] io_mem_acquire_bits_a_opcode = 3'h6; // @[Edges.scala:346:17]
wire [2:0] _io_mem_acquire_bits_a_mask_sizeOH_T = 3'h6; // @[Misc.scala:202:34]
wire [3:0] io_mem_acquire_bits_size = 4'h6; // @[mshrs.scala:36:7]
wire [3:0] _r_T_12 = 4'h6; // @[Metadata.scala:127:10]
wire [3:0] _grow_param_r_T_10 = 4'h6; // @[Metadata.scala:64:10]
wire [3:0] _r1_T_10 = 4'h6; // @[Metadata.scala:64:10]
wire [3:0] _r2_T_10 = 4'h6; // @[Metadata.scala:64:10]
wire [3:0] io_mem_acquire_bits_a_size = 4'h6; // @[Edges.scala:346:17]
wire [3:0] _state_req_needs_wb_r_T_12 = 4'h6; // @[Metadata.scala:127:10]
wire [3:0] _state_r_T_10 = 4'h6; // @[Metadata.scala:64:10]
wire [3:0] _needs_wb_r_T_12 = 4'h6; // @[Metadata.scala:127:10]
wire [3:0] _r_T_74 = 4'h6; // @[Metadata.scala:64:10]
wire [3:0] _r_T_133 = 4'h6; // @[Metadata.scala:64:10]
wire [3:0] _state_req_needs_wb_r_T_76 = 4'h6; // @[Metadata.scala:127:10]
wire [3:0] _state_r_T_69 = 4'h6; // @[Metadata.scala:64:10]
wire [3:0] io_mem_acquire_bits_source = 4'h1; // @[mshrs.scala:36:7]
wire [3:0] io_wb_req_bits_source = 4'h1; // @[mshrs.scala:36:7]
wire [3:0] _r_T_9 = 4'h1; // @[Metadata.scala:124:10]
wire [3:0] _grow_param_r_T_6 = 4'h1; // @[Metadata.scala:62:10]
wire [3:0] _coh_on_grant_T_2 = 4'h1; // @[Metadata.scala:86:10]
wire [3:0] _r1_T_6 = 4'h1; // @[Metadata.scala:62:10]
wire [3:0] _r2_T_6 = 4'h1; // @[Metadata.scala:62:10]
wire [3:0] io_mem_acquire_bits_a_source = 4'h1; // @[Edges.scala:346:17]
wire [3:0] _state_req_needs_wb_r_T_9 = 4'h1; // @[Metadata.scala:124:10]
wire [3:0] _state_r_T_6 = 4'h1; // @[Metadata.scala:62:10]
wire [3:0] _needs_wb_r_T_9 = 4'h1; // @[Metadata.scala:124:10]
wire [3:0] _r_T_70 = 4'h1; // @[Metadata.scala:62:10]
wire [3:0] _r_T_129 = 4'h1; // @[Metadata.scala:62:10]
wire [3:0] _state_req_needs_wb_r_T_73 = 4'h1; // @[Metadata.scala:124:10]
wire [3:0] _state_r_T_65 = 4'h1; // @[Metadata.scala:62:10]
wire [7:0] io_mem_acquire_bits_mask = 8'hFF; // @[mshrs.scala:36:7]
wire [7:0] io_mem_acquire_bits_a_mask = 8'hFF; // @[Edges.scala:346:17]
wire [7:0] _io_mem_acquire_bits_a_mask_T = 8'hFF; // @[Misc.scala:222:10]
wire io_refill_bits_wmask = 1'h1; // @[mshrs.scala:36:7]
wire io_wb_req_bits_voluntary = 1'h1; // @[mshrs.scala:36:7]
wire _r_T = 1'h1; // @[Metadata.scala:140:24]
wire _io_mem_acquire_bits_legal_T_7 = 1'h1; // @[Parameters.scala:91:44]
wire _io_mem_acquire_bits_legal_T_8 = 1'h1; // @[Parameters.scala:684:29]
wire io_mem_acquire_bits_a_mask_sub_sub_sub_0_1 = 1'h1; // @[Misc.scala:206:21]
wire io_mem_acquire_bits_a_mask_sub_sub_size = 1'h1; // @[Misc.scala:209:26]
wire io_mem_acquire_bits_a_mask_sub_sub_0_1 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_sub_sub_1_1 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_sub_0_1 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_sub_1_1 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_sub_2_1 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_sub_3_1 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_size = 1'h1; // @[Misc.scala:209:26]
wire io_mem_acquire_bits_a_mask_acc = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_acc_1 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_acc_2 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_acc_3 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_acc_4 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_acc_5 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_acc_6 = 1'h1; // @[Misc.scala:215:29]
wire io_mem_acquire_bits_a_mask_acc_7 = 1'h1; // @[Misc.scala:215:29]
wire _io_refill_bits_wmask_T = 1'h1; // @[mshrs.scala:174:28]
wire _state_req_needs_wb_r_T = 1'h1; // @[Metadata.scala:140:24]
wire _needs_wb_r_T = 1'h1; // @[Metadata.scala:140:24]
wire _state_req_needs_wb_r_T_64 = 1'h1; // @[Metadata.scala:140:24]
wire [3:0] _grow_param_r_T_24 = 4'hC; // @[Metadata.scala:72:10]
wire [3:0] _coh_on_grant_T_8 = 4'hC; // @[Metadata.scala:89:10]
wire [3:0] _r1_T_24 = 4'hC; // @[Metadata.scala:72:10]
wire [3:0] _r2_T_24 = 4'hC; // @[Metadata.scala:72:10]
wire [3:0] _state_r_T_24 = 4'hC; // @[Metadata.scala:72:10]
wire [3:0] _r_T_88 = 4'hC; // @[Metadata.scala:72:10]
wire [3:0] _r_T_147 = 4'hC; // @[Metadata.scala:72:10]
wire [3:0] _state_r_T_83 = 4'hC; // @[Metadata.scala:72:10]
wire [1:0] _grow_param_r_T_11 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _grow_param_r_T_13 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _grow_param_r_T_21 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _grow_param_r_T_23 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _coh_on_grant_T_7 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r1_T_11 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r1_T_13 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r1_T_21 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r1_T_23 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r2_T_11 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r2_T_13 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r2_T_21 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r2_T_23 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _dirties_T = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] io_mem_acquire_bits_a_mask_lo_lo = 2'h3; // @[Misc.scala:222:10]
wire [1:0] io_mem_acquire_bits_a_mask_lo_hi = 2'h3; // @[Misc.scala:222:10]
wire [1:0] io_mem_acquire_bits_a_mask_hi_lo = 2'h3; // @[Misc.scala:222:10]
wire [1:0] io_mem_acquire_bits_a_mask_hi_hi = 2'h3; // @[Misc.scala:222:10]
wire [1:0] _state_r_T_11 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _state_r_T_13 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _state_r_T_21 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _state_r_T_23 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_75 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_77 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_85 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_87 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_134 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_136 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_144 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _r_T_146 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _state_r_T_70 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _state_r_T_72 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _state_r_T_80 = 2'h3; // @[Metadata.scala:24:15]
wire [1:0] _state_r_T_82 = 2'h3; // @[Metadata.scala:24:15]
wire [3:0] _grow_param_r_T_22 = 4'hD; // @[Metadata.scala:71:10]
wire [3:0] _r1_T_22 = 4'hD; // @[Metadata.scala:71:10]
wire [3:0] _r2_T_22 = 4'hD; // @[Metadata.scala:71:10]
wire [3:0] _state_r_T_22 = 4'hD; // @[Metadata.scala:71:10]
wire [3:0] _r_T_86 = 4'hD; // @[Metadata.scala:71:10]
wire [3:0] _r_T_145 = 4'hD; // @[Metadata.scala:71:10]
wire [3:0] _state_r_T_81 = 4'hD; // @[Metadata.scala:71:10]
wire [3:0] _r_T_14 = 4'h4; // @[Metadata.scala:129:10]
wire [3:0] _grow_param_r_T_20 = 4'h4; // @[Metadata.scala:70:10]
wire [3:0] _coh_on_grant_T_6 = 4'h4; // @[Metadata.scala:88:10]
wire [3:0] _r1_T_20 = 4'h4; // @[Metadata.scala:70:10]
wire [3:0] _r2_T_20 = 4'h4; // @[Metadata.scala:70:10]
wire [3:0] _io_mem_acquire_bits_a_mask_sizeOH_T_1 = 4'h4; // @[OneHot.scala:65:12]
wire [3:0] _state_req_needs_wb_r_T_14 = 4'h4; // @[Metadata.scala:129:10]
wire [3:0] _state_r_T_20 = 4'h4; // @[Metadata.scala:70:10]
wire [3:0] _needs_wb_r_T_14 = 4'h4; // @[Metadata.scala:129:10]
wire [3:0] _r_T_84 = 4'h4; // @[Metadata.scala:70:10]
wire [3:0] _r_T_143 = 4'h4; // @[Metadata.scala:70:10]
wire [3:0] _state_req_needs_wb_r_T_78 = 4'h4; // @[Metadata.scala:129:10]
wire [3:0] _state_r_T_79 = 4'h4; // @[Metadata.scala:70:10]
wire [3:0] _r_T_13 = 4'h5; // @[Metadata.scala:128:10]
wire [3:0] _grow_param_r_T_18 = 4'h5; // @[Metadata.scala:69:10]
wire [3:0] _r1_T_18 = 4'h5; // @[Metadata.scala:69:10]
wire [3:0] _r2_T_18 = 4'h5; // @[Metadata.scala:69:10]
wire [3:0] _state_req_needs_wb_r_T_13 = 4'h5; // @[Metadata.scala:128:10]
wire [3:0] _state_r_T_18 = 4'h5; // @[Metadata.scala:69:10]
wire [3:0] _needs_wb_r_T_13 = 4'h5; // @[Metadata.scala:128:10]
wire [3:0] _r_T_82 = 4'h5; // @[Metadata.scala:69:10]
wire [3:0] _r_T_141 = 4'h5; // @[Metadata.scala:69:10]
wire [3:0] _state_req_needs_wb_r_T_77 = 4'h5; // @[Metadata.scala:128:10]
wire [3:0] _state_r_T_77 = 4'h5; // @[Metadata.scala:69:10]
wire [3:0] _grow_param_r_T_14 = 4'hE; // @[Metadata.scala:66:10]
wire [3:0] _r1_T_14 = 4'hE; // @[Metadata.scala:66:10]
wire [3:0] _r2_T_14 = 4'hE; // @[Metadata.scala:66:10]
wire [3:0] _state_r_T_14 = 4'hE; // @[Metadata.scala:66:10]
wire [3:0] _r_T_78 = 4'hE; // @[Metadata.scala:66:10]
wire [3:0] _r_T_137 = 4'hE; // @[Metadata.scala:66:10]
wire [3:0] _state_r_T_73 = 4'hE; // @[Metadata.scala:66:10]
wire [3:0] _grow_param_r_T_12 = 4'hF; // @[Metadata.scala:65:10]
wire [3:0] _r1_T_12 = 4'hF; // @[Metadata.scala:65:10]
wire [3:0] _r2_T_12 = 4'hF; // @[Metadata.scala:65:10]
wire [3:0] io_mem_acquire_bits_a_mask_lo = 4'hF; // @[Misc.scala:222:10]
wire [3:0] io_mem_acquire_bits_a_mask_hi = 4'hF; // @[Misc.scala:222:10]
wire [3:0] _state_r_T_12 = 4'hF; // @[Metadata.scala:65:10]
wire [3:0] _r_T_76 = 4'hF; // @[Metadata.scala:65:10]
wire [3:0] _r_T_135 = 4'hF; // @[Metadata.scala:65:10]
wire [3:0] _state_r_T_71 = 4'hF; // @[Metadata.scala:65:10]
wire [3:0] _r_T_11 = 4'h7; // @[Metadata.scala:126:10]
wire [3:0] _grow_param_r_T_8 = 4'h7; // @[Metadata.scala:63:10]
wire [3:0] _r1_T_8 = 4'h7; // @[Metadata.scala:63:10]
wire [3:0] _r2_T_8 = 4'h7; // @[Metadata.scala:63:10]
wire [3:0] _state_req_needs_wb_r_T_11 = 4'h7; // @[Metadata.scala:126:10]
wire [3:0] _state_r_T_8 = 4'h7; // @[Metadata.scala:63:10]
wire [3:0] _needs_wb_r_T_11 = 4'h7; // @[Metadata.scala:126:10]
wire [3:0] _r_T_72 = 4'h7; // @[Metadata.scala:63:10]
wire [3:0] _r_T_131 = 4'h7; // @[Metadata.scala:63:10]
wire [3:0] _state_req_needs_wb_r_T_75 = 4'h7; // @[Metadata.scala:126:10]
wire [3:0] _state_r_T_67 = 4'h7; // @[Metadata.scala:63:10]
wire [3:0] _r_T_8 = 4'h2; // @[Metadata.scala:123:10]
wire [3:0] _grow_param_r_T_4 = 4'h2; // @[Metadata.scala:61:10]
wire [3:0] _r1_T_4 = 4'h2; // @[Metadata.scala:61:10]
wire [3:0] _r2_T_4 = 4'h2; // @[Metadata.scala:61:10]
wire [3:0] _state_req_needs_wb_r_T_8 = 4'h2; // @[Metadata.scala:123:10]
wire [3:0] _state_r_T_4 = 4'h2; // @[Metadata.scala:61:10]
wire [3:0] _needs_wb_r_T_8 = 4'h2; // @[Metadata.scala:123:10]
wire [3:0] _r_T_68 = 4'h2; // @[Metadata.scala:61:10]
wire [3:0] _r_T_127 = 4'h2; // @[Metadata.scala:61:10]
wire [3:0] _state_req_needs_wb_r_T_72 = 4'h2; // @[Metadata.scala:123:10]
wire [3:0] _state_r_T_63 = 4'h2; // @[Metadata.scala:61:10]
wire [3:0] _r_T_7 = 4'h3; // @[Metadata.scala:122:10]
wire [3:0] _grow_param_r_T_2 = 4'h3; // @[Metadata.scala:60:10]
wire [3:0] _r1_T_2 = 4'h3; // @[Metadata.scala:60:10]
wire [3:0] _r2_T_2 = 4'h3; // @[Metadata.scala:60:10]
wire [3:0] _state_req_needs_wb_r_T_7 = 4'h3; // @[Metadata.scala:122:10]
wire [3:0] _state_r_T_2 = 4'h3; // @[Metadata.scala:60:10]
wire [3:0] _needs_wb_r_T_7 = 4'h3; // @[Metadata.scala:122:10]
wire [3:0] _r_T_66 = 4'h3; // @[Metadata.scala:60:10]
wire [3:0] _r_T_125 = 4'h3; // @[Metadata.scala:60:10]
wire [3:0] _state_req_needs_wb_r_T_71 = 4'h3; // @[Metadata.scala:122:10]
wire [3:0] _state_r_T_61 = 4'h3; // @[Metadata.scala:60:10]
wire [3:0] _r_T_18 = 4'h8; // @[Metadata.scala:133:10]
wire [3:0] _state_req_needs_wb_r_T_18 = 4'h8; // @[Metadata.scala:133:10]
wire [3:0] _needs_wb_r_T_18 = 4'h8; // @[Metadata.scala:133:10]
wire [3:0] _state_req_needs_wb_r_T_82 = 4'h8; // @[Metadata.scala:133:10]
wire [3:0] _r_T_17 = 4'h9; // @[Metadata.scala:132:10]
wire [3:0] _state_req_needs_wb_r_T_17 = 4'h9; // @[Metadata.scala:132:10]
wire [3:0] _needs_wb_r_T_17 = 4'h9; // @[Metadata.scala:132:10]
wire [3:0] _state_req_needs_wb_r_T_81 = 4'h9; // @[Metadata.scala:132:10]
wire [3:0] _r_T_16 = 4'hA; // @[Metadata.scala:131:10]
wire [3:0] _state_req_needs_wb_r_T_16 = 4'hA; // @[Metadata.scala:131:10]
wire [3:0] _needs_wb_r_T_16 = 4'hA; // @[Metadata.scala:131:10]
wire [3:0] _state_req_needs_wb_r_T_80 = 4'hA; // @[Metadata.scala:131:10]
wire [3:0] _r_T_15 = 4'hB; // @[Metadata.scala:130:10]
wire [3:0] _state_req_needs_wb_r_T_15 = 4'hB; // @[Metadata.scala:130:10]
wire [3:0] _needs_wb_r_T_15 = 4'hB; // @[Metadata.scala:130:10]
wire [3:0] _state_req_needs_wb_r_T_79 = 4'hB; // @[Metadata.scala:130:10]
wire [1:0] _r_T_1 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _r_T_3 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _r_T_5 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] io_mem_acquire_bits_a_mask_sizeOH_shiftAmount = 2'h2; // @[OneHot.scala:64:49]
wire [1:0] _state_req_needs_wb_r_T_1 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _state_req_needs_wb_r_T_3 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _state_req_needs_wb_r_T_5 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _needs_wb_r_T_1 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _needs_wb_r_T_3 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _needs_wb_r_T_5 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _state_req_needs_wb_r_T_65 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _state_req_needs_wb_r_T_67 = 2'h2; // @[Metadata.scala:140:24]
wire [1:0] _state_req_needs_wb_r_T_69 = 2'h2; // @[Metadata.scala:140:24]
wire [6:0] _data_word_T = 7'h0; // @[mshrs.scala:274:32]
wire [2:0] io_mem_acquire_bits_a_mask_sizeOH = 3'h5; // @[Misc.scala:202:81]
wire [2:0] _io_mem_acquire_bits_a_mask_sizeOH_T_2 = 3'h4; // @[OneHot.scala:65:27]
wire [1:0] _grow_param_r_T_7 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _grow_param_r_T_9 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _grow_param_r_T_17 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _grow_param_r_T_19 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _coh_on_grant_T_5 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r1_T_7 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r1_T_9 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r1_T_17 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r1_T_19 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r2_T_7 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r2_T_9 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r2_T_17 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r2_T_19 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _state_r_T_7 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _state_r_T_9 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _state_r_T_17 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _state_r_T_19 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_71 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_73 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_81 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_83 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_130 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_132 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_140 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _r_T_142 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _state_r_T_66 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _state_r_T_68 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _state_r_T_76 = 2'h1; // @[Metadata.scala:25:15]
wire [1:0] _state_r_T_78 = 2'h1; // @[Metadata.scala:25:15]
wire _io_req_sec_rdy_T; // @[mshrs.scala:163:37]
wire _io_idx_valid_T; // @[mshrs.scala:149:25]
wire [3:0] req_idx; // @[mshrs.scala:110:25]
wire _io_way_valid_T_3; // @[mshrs.scala:151:19]
wire _io_tag_valid_T; // @[mshrs.scala:150:25]
wire [23:0] req_tag; // @[mshrs.scala:111:26]
wire [2:0] io_mem_acquire_bits_a_param; // @[Edges.scala:346:17]
wire [31:0] io_mem_acquire_bits_a_address; // @[Edges.scala:346:17]
wire [2:0] grantack_bits_e_sink = io_mem_grant_bits_sink_0; // @[Edges.scala:451:17]
wire [63:0] io_lb_write_bits_data_0 = io_mem_grant_bits_data_0; // @[mshrs.scala:36:7]
wire [2:0] shrink_param; // @[Misc.scala:38:36]
wire [1:0] coh_on_grant_state; // @[Metadata.scala:160:20]
wire [63:0] io_refill_bits_data_0 = io_lb_resp_0; // @[mshrs.scala:36:7]
wire [63:0] data_word = io_lb_resp_0; // @[mshrs.scala:36:7, :274:26]
wire _io_probe_rdy_T_11; // @[mshrs.scala:148:42]
wire io_idx_valid_0; // @[mshrs.scala:36:7]
wire [3:0] io_idx_bits_0; // @[mshrs.scala:36:7]
wire io_way_valid_0; // @[mshrs.scala:36:7]
wire [1:0] io_way_bits_0; // @[mshrs.scala:36:7]
wire io_tag_valid_0; // @[mshrs.scala:36:7]
wire [23:0] io_tag_bits_0; // @[mshrs.scala:36:7]
wire [2:0] io_mem_acquire_bits_param_0; // @[mshrs.scala:36:7]
wire [31:0] io_mem_acquire_bits_address_0; // @[mshrs.scala:36:7]
wire io_mem_acquire_valid_0; // @[mshrs.scala:36:7]
wire io_mem_grant_ready_0; // @[mshrs.scala:36:7]
wire [2:0] io_mem_finish_bits_sink_0; // @[mshrs.scala:36:7]
wire io_mem_finish_valid_0; // @[mshrs.scala:36:7]
wire [1:0] io_refill_bits_way_en_0; // @[mshrs.scala:36:7]
wire [9:0] io_refill_bits_addr_0; // @[mshrs.scala:36:7]
wire io_refill_valid_0; // @[mshrs.scala:36:7]
wire [1:0] io_meta_write_bits_data_coh_state_0; // @[mshrs.scala:36:7]
wire [21:0] io_meta_write_bits_data_tag_0; // @[mshrs.scala:36:7]
wire [3:0] io_meta_write_bits_idx_0; // @[mshrs.scala:36:7]
wire [1:0] io_meta_write_bits_way_en_0; // @[mshrs.scala:36:7]
wire [21:0] io_meta_write_bits_tag_0; // @[mshrs.scala:36:7]
wire io_meta_write_valid_0; // @[mshrs.scala:36:7]
wire [3:0] io_meta_read_bits_idx_0; // @[mshrs.scala:36:7]
wire [1:0] io_meta_read_bits_way_en_0; // @[mshrs.scala:36:7]
wire [21:0] io_meta_read_bits_tag_0; // @[mshrs.scala:36:7]
wire io_meta_read_valid_0; // @[mshrs.scala:36:7]
wire [21:0] io_wb_req_bits_tag_0; // @[mshrs.scala:36:7]
wire [3:0] io_wb_req_bits_idx_0; // @[mshrs.scala:36:7]
wire [2:0] io_wb_req_bits_param_0; // @[mshrs.scala:36:7]
wire [1:0] io_wb_req_bits_way_en_0; // @[mshrs.scala:36:7]
wire io_wb_req_valid_0; // @[mshrs.scala:36:7]
wire [1:0] io_commit_coh_state_0; // @[mshrs.scala:36:7]
wire [2:0] io_lb_read_offset_0; // @[mshrs.scala:36:7]
wire [2:0] io_lb_write_bits_offset_0; // @[mshrs.scala:36:7]
wire io_lb_write_valid_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iq_type_0_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iq_type_1_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iq_type_2_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iq_type_3_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fu_code_0_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fu_code_1_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fu_code_2_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fu_code_3_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fu_code_4_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fu_code_5_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fu_code_6_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fu_code_7_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fu_code_8_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fu_code_9_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_ldst_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_wen_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_ren1_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_ren2_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_ren3_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_swap12_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_swap23_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_uop_fp_ctrl_typeTagIn_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_uop_fp_ctrl_typeTagOut_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_fromint_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_toint_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_fastpipe_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_fma_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_div_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_sqrt_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_wflags_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_ctrl_vec_0; // @[mshrs.scala:36:7]
wire [31:0] io_replay_bits_uop_inst_0; // @[mshrs.scala:36:7]
wire [31:0] io_replay_bits_uop_debug_inst_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_rvc_0; // @[mshrs.scala:36:7]
wire [33:0] io_replay_bits_uop_debug_pc_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iw_issued_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iw_issued_partial_agen_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iw_issued_partial_dgen_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iw_p1_speculative_child_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iw_p2_speculative_child_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iw_p1_bypass_hint_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iw_p2_bypass_hint_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_iw_p3_bypass_hint_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_dis_col_sel_0; // @[mshrs.scala:36:7]
wire [3:0] io_replay_bits_uop_br_mask_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_uop_br_tag_0; // @[mshrs.scala:36:7]
wire [3:0] io_replay_bits_uop_br_type_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_sfb_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_fence_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_fencei_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_sfence_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_amo_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_eret_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_sys_pc2epc_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_rocc_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_mov_0; // @[mshrs.scala:36:7]
wire [3:0] io_replay_bits_uop_ftq_idx_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_edge_inst_0; // @[mshrs.scala:36:7]
wire [5:0] io_replay_bits_uop_pc_lob_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_taken_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_imm_rename_0; // @[mshrs.scala:36:7]
wire [2:0] io_replay_bits_uop_imm_sel_0; // @[mshrs.scala:36:7]
wire [4:0] io_replay_bits_uop_pimm_0; // @[mshrs.scala:36:7]
wire [19:0] io_replay_bits_uop_imm_packed_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_uop_op1_sel_0; // @[mshrs.scala:36:7]
wire [2:0] io_replay_bits_uop_op2_sel_0; // @[mshrs.scala:36:7]
wire [4:0] io_replay_bits_uop_rob_idx_0; // @[mshrs.scala:36:7]
wire [3:0] io_replay_bits_uop_ldq_idx_0; // @[mshrs.scala:36:7]
wire [3:0] io_replay_bits_uop_stq_idx_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_uop_rxq_idx_0; // @[mshrs.scala:36:7]
wire [5:0] io_replay_bits_uop_pdst_0; // @[mshrs.scala:36:7]
wire [5:0] io_replay_bits_uop_prs1_0; // @[mshrs.scala:36:7]
wire [5:0] io_replay_bits_uop_prs2_0; // @[mshrs.scala:36:7]
wire [5:0] io_replay_bits_uop_prs3_0; // @[mshrs.scala:36:7]
wire [3:0] io_replay_bits_uop_ppred_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_prs1_busy_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_prs2_busy_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_prs3_busy_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_ppred_busy_0; // @[mshrs.scala:36:7]
wire [5:0] io_replay_bits_uop_stale_pdst_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_exception_0; // @[mshrs.scala:36:7]
wire [63:0] io_replay_bits_uop_exc_cause_0; // @[mshrs.scala:36:7]
wire [4:0] io_replay_bits_uop_mem_cmd_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_uop_mem_size_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_mem_signed_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_uses_ldq_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_uses_stq_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_is_unique_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_flush_on_commit_0; // @[mshrs.scala:36:7]
wire [2:0] io_replay_bits_uop_csr_cmd_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_ldst_is_rs1_0; // @[mshrs.scala:36:7]
wire [5:0] io_replay_bits_uop_ldst_0; // @[mshrs.scala:36:7]
wire [5:0] io_replay_bits_uop_lrs1_0; // @[mshrs.scala:36:7]
wire [5:0] io_replay_bits_uop_lrs2_0; // @[mshrs.scala:36:7]
wire [5:0] io_replay_bits_uop_lrs3_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_uop_dst_rtype_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_uop_lrs1_rtype_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_uop_lrs2_rtype_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_frs3_en_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fcn_dw_0; // @[mshrs.scala:36:7]
wire [4:0] io_replay_bits_uop_fcn_op_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_fp_val_0; // @[mshrs.scala:36:7]
wire [2:0] io_replay_bits_uop_fp_rm_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_uop_fp_typ_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_xcpt_pf_if_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_xcpt_ae_if_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_xcpt_ma_if_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_bp_debug_if_0; // @[mshrs.scala:36:7]
wire io_replay_bits_uop_bp_xcpt_if_0; // @[mshrs.scala:36:7]
wire [2:0] io_replay_bits_uop_debug_fsrc_0; // @[mshrs.scala:36:7]
wire [2:0] io_replay_bits_uop_debug_tsrc_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_old_meta_coh_state_0; // @[mshrs.scala:36:7]
wire [21:0] io_replay_bits_old_meta_tag_0; // @[mshrs.scala:36:7]
wire [33:0] io_replay_bits_addr_0; // @[mshrs.scala:36:7]
wire [63:0] io_replay_bits_data_0; // @[mshrs.scala:36:7]
wire io_replay_bits_is_hella_0; // @[mshrs.scala:36:7]
wire io_replay_bits_tag_match_0; // @[mshrs.scala:36:7]
wire [1:0] io_replay_bits_way_en_0; // @[mshrs.scala:36:7]
wire [4:0] io_replay_bits_sdq_id_0; // @[mshrs.scala:36:7]
wire io_replay_valid_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iq_type_0_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iq_type_1_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iq_type_2_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iq_type_3_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fu_code_0_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fu_code_1_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fu_code_2_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fu_code_3_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fu_code_4_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fu_code_5_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fu_code_6_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fu_code_7_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fu_code_8_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fu_code_9_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_ldst_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_wen_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_ren1_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_ren2_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_ren3_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_swap12_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_swap23_0; // @[mshrs.scala:36:7]
wire [1:0] io_resp_bits_uop_fp_ctrl_typeTagIn_0; // @[mshrs.scala:36:7]
wire [1:0] io_resp_bits_uop_fp_ctrl_typeTagOut_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_fromint_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_toint_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_fastpipe_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_fma_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_div_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_sqrt_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_wflags_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_ctrl_vec_0; // @[mshrs.scala:36:7]
wire [31:0] io_resp_bits_uop_inst_0; // @[mshrs.scala:36:7]
wire [31:0] io_resp_bits_uop_debug_inst_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_rvc_0; // @[mshrs.scala:36:7]
wire [33:0] io_resp_bits_uop_debug_pc_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iw_issued_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iw_issued_partial_agen_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iw_issued_partial_dgen_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iw_p1_speculative_child_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iw_p2_speculative_child_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iw_p1_bypass_hint_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iw_p2_bypass_hint_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_iw_p3_bypass_hint_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_dis_col_sel_0; // @[mshrs.scala:36:7]
wire [3:0] io_resp_bits_uop_br_mask_0; // @[mshrs.scala:36:7]
wire [1:0] io_resp_bits_uop_br_tag_0; // @[mshrs.scala:36:7]
wire [3:0] io_resp_bits_uop_br_type_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_sfb_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_fence_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_fencei_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_sfence_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_amo_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_eret_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_sys_pc2epc_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_rocc_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_mov_0; // @[mshrs.scala:36:7]
wire [3:0] io_resp_bits_uop_ftq_idx_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_edge_inst_0; // @[mshrs.scala:36:7]
wire [5:0] io_resp_bits_uop_pc_lob_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_taken_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_imm_rename_0; // @[mshrs.scala:36:7]
wire [2:0] io_resp_bits_uop_imm_sel_0; // @[mshrs.scala:36:7]
wire [4:0] io_resp_bits_uop_pimm_0; // @[mshrs.scala:36:7]
wire [19:0] io_resp_bits_uop_imm_packed_0; // @[mshrs.scala:36:7]
wire [1:0] io_resp_bits_uop_op1_sel_0; // @[mshrs.scala:36:7]
wire [2:0] io_resp_bits_uop_op2_sel_0; // @[mshrs.scala:36:7]
wire [4:0] io_resp_bits_uop_rob_idx_0; // @[mshrs.scala:36:7]
wire [3:0] io_resp_bits_uop_ldq_idx_0; // @[mshrs.scala:36:7]
wire [3:0] io_resp_bits_uop_stq_idx_0; // @[mshrs.scala:36:7]
wire [1:0] io_resp_bits_uop_rxq_idx_0; // @[mshrs.scala:36:7]
wire [5:0] io_resp_bits_uop_pdst_0; // @[mshrs.scala:36:7]
wire [5:0] io_resp_bits_uop_prs1_0; // @[mshrs.scala:36:7]
wire [5:0] io_resp_bits_uop_prs2_0; // @[mshrs.scala:36:7]
wire [5:0] io_resp_bits_uop_prs3_0; // @[mshrs.scala:36:7]
wire [3:0] io_resp_bits_uop_ppred_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_prs1_busy_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_prs2_busy_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_prs3_busy_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_ppred_busy_0; // @[mshrs.scala:36:7]
wire [5:0] io_resp_bits_uop_stale_pdst_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_exception_0; // @[mshrs.scala:36:7]
wire [63:0] io_resp_bits_uop_exc_cause_0; // @[mshrs.scala:36:7]
wire [4:0] io_resp_bits_uop_mem_cmd_0; // @[mshrs.scala:36:7]
wire [1:0] io_resp_bits_uop_mem_size_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_mem_signed_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_uses_ldq_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_uses_stq_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_is_unique_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_flush_on_commit_0; // @[mshrs.scala:36:7]
wire [2:0] io_resp_bits_uop_csr_cmd_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_ldst_is_rs1_0; // @[mshrs.scala:36:7]
wire [5:0] io_resp_bits_uop_ldst_0; // @[mshrs.scala:36:7]
wire [5:0] io_resp_bits_uop_lrs1_0; // @[mshrs.scala:36:7]
wire [5:0] io_resp_bits_uop_lrs2_0; // @[mshrs.scala:36:7]
wire [5:0] io_resp_bits_uop_lrs3_0; // @[mshrs.scala:36:7]
wire [1:0] io_resp_bits_uop_dst_rtype_0; // @[mshrs.scala:36:7]
wire [1:0] io_resp_bits_uop_lrs1_rtype_0; // @[mshrs.scala:36:7]
wire [1:0] io_resp_bits_uop_lrs2_rtype_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_frs3_en_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fcn_dw_0; // @[mshrs.scala:36:7]
wire [4:0] io_resp_bits_uop_fcn_op_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_fp_val_0; // @[mshrs.scala:36:7]
wire [2:0] io_resp_bits_uop_fp_rm_0; // @[mshrs.scala:36:7]
wire [1:0] io_resp_bits_uop_fp_typ_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_xcpt_pf_if_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_xcpt_ae_if_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_xcpt_ma_if_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_bp_debug_if_0; // @[mshrs.scala:36:7]
wire io_resp_bits_uop_bp_xcpt_if_0; // @[mshrs.scala:36:7]
wire [2:0] io_resp_bits_uop_debug_fsrc_0; // @[mshrs.scala:36:7]
wire [2:0] io_resp_bits_uop_debug_tsrc_0; // @[mshrs.scala:36:7]
wire [63:0] io_resp_bits_data_0; // @[mshrs.scala:36:7]
wire io_resp_bits_is_hella_0; // @[mshrs.scala:36:7]
wire io_resp_valid_0; // @[mshrs.scala:36:7]
wire io_req_pri_rdy_0; // @[mshrs.scala:36:7]
wire io_req_sec_rdy_0; // @[mshrs.scala:36:7]
wire io_commit_val_0; // @[mshrs.scala:36:7]
wire [33:0] io_commit_addr_0; // @[mshrs.scala:36:7]
wire io_probe_rdy_0; // @[mshrs.scala:36:7]
reg [4:0] state; // @[mshrs.scala:107:22]
reg [31:0] req_uop_inst; // @[mshrs.scala:109:20]
reg [31:0] req_uop_debug_inst; // @[mshrs.scala:109:20]
reg req_uop_is_rvc; // @[mshrs.scala:109:20]
reg [33:0] req_uop_debug_pc; // @[mshrs.scala:109:20]
reg req_uop_iq_type_0; // @[mshrs.scala:109:20]
reg req_uop_iq_type_1; // @[mshrs.scala:109:20]
reg req_uop_iq_type_2; // @[mshrs.scala:109:20]
reg req_uop_iq_type_3; // @[mshrs.scala:109:20]
reg req_uop_fu_code_0; // @[mshrs.scala:109:20]
reg req_uop_fu_code_1; // @[mshrs.scala:109:20]
reg req_uop_fu_code_2; // @[mshrs.scala:109:20]
reg req_uop_fu_code_3; // @[mshrs.scala:109:20]
reg req_uop_fu_code_4; // @[mshrs.scala:109:20]
reg req_uop_fu_code_5; // @[mshrs.scala:109:20]
reg req_uop_fu_code_6; // @[mshrs.scala:109:20]
reg req_uop_fu_code_7; // @[mshrs.scala:109:20]
reg req_uop_fu_code_8; // @[mshrs.scala:109:20]
reg req_uop_fu_code_9; // @[mshrs.scala:109:20]
reg req_uop_iw_issued; // @[mshrs.scala:109:20]
reg req_uop_iw_issued_partial_agen; // @[mshrs.scala:109:20]
reg req_uop_iw_issued_partial_dgen; // @[mshrs.scala:109:20]
reg req_uop_iw_p1_speculative_child; // @[mshrs.scala:109:20]
reg req_uop_iw_p2_speculative_child; // @[mshrs.scala:109:20]
reg req_uop_iw_p1_bypass_hint; // @[mshrs.scala:109:20]
reg req_uop_iw_p2_bypass_hint; // @[mshrs.scala:109:20]
reg req_uop_iw_p3_bypass_hint; // @[mshrs.scala:109:20]
reg req_uop_dis_col_sel; // @[mshrs.scala:109:20]
reg [3:0] req_uop_br_mask; // @[mshrs.scala:109:20]
reg [1:0] req_uop_br_tag; // @[mshrs.scala:109:20]
reg [3:0] req_uop_br_type; // @[mshrs.scala:109:20]
reg req_uop_is_sfb; // @[mshrs.scala:109:20]
reg req_uop_is_fence; // @[mshrs.scala:109:20]
reg req_uop_is_fencei; // @[mshrs.scala:109:20]
reg req_uop_is_sfence; // @[mshrs.scala:109:20]
reg req_uop_is_amo; // @[mshrs.scala:109:20]
reg req_uop_is_eret; // @[mshrs.scala:109:20]
reg req_uop_is_sys_pc2epc; // @[mshrs.scala:109:20]
reg req_uop_is_rocc; // @[mshrs.scala:109:20]
reg req_uop_is_mov; // @[mshrs.scala:109:20]
reg [3:0] req_uop_ftq_idx; // @[mshrs.scala:109:20]
reg req_uop_edge_inst; // @[mshrs.scala:109:20]
reg [5:0] req_uop_pc_lob; // @[mshrs.scala:109:20]
reg req_uop_taken; // @[mshrs.scala:109:20]
reg req_uop_imm_rename; // @[mshrs.scala:109:20]
reg [2:0] req_uop_imm_sel; // @[mshrs.scala:109:20]
reg [4:0] req_uop_pimm; // @[mshrs.scala:109:20]
reg [19:0] req_uop_imm_packed; // @[mshrs.scala:109:20]
reg [1:0] req_uop_op1_sel; // @[mshrs.scala:109:20]
reg [2:0] req_uop_op2_sel; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_ldst; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_wen; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_ren1; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_ren2; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_ren3; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_swap12; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_swap23; // @[mshrs.scala:109:20]
reg [1:0] req_uop_fp_ctrl_typeTagIn; // @[mshrs.scala:109:20]
reg [1:0] req_uop_fp_ctrl_typeTagOut; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_fromint; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_toint; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_fastpipe; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_fma; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_div; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_sqrt; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_wflags; // @[mshrs.scala:109:20]
reg req_uop_fp_ctrl_vec; // @[mshrs.scala:109:20]
reg [4:0] req_uop_rob_idx; // @[mshrs.scala:109:20]
reg [3:0] req_uop_ldq_idx; // @[mshrs.scala:109:20]
reg [3:0] req_uop_stq_idx; // @[mshrs.scala:109:20]
reg [1:0] req_uop_rxq_idx; // @[mshrs.scala:109:20]
reg [5:0] req_uop_pdst; // @[mshrs.scala:109:20]
reg [5:0] req_uop_prs1; // @[mshrs.scala:109:20]
reg [5:0] req_uop_prs2; // @[mshrs.scala:109:20]
reg [5:0] req_uop_prs3; // @[mshrs.scala:109:20]
reg [3:0] req_uop_ppred; // @[mshrs.scala:109:20]
reg req_uop_prs1_busy; // @[mshrs.scala:109:20]
reg req_uop_prs2_busy; // @[mshrs.scala:109:20]
reg req_uop_prs3_busy; // @[mshrs.scala:109:20]
reg req_uop_ppred_busy; // @[mshrs.scala:109:20]
reg [5:0] req_uop_stale_pdst; // @[mshrs.scala:109:20]
reg req_uop_exception; // @[mshrs.scala:109:20]
reg [63:0] req_uop_exc_cause; // @[mshrs.scala:109:20]
reg [4:0] req_uop_mem_cmd; // @[mshrs.scala:109:20]
reg [1:0] req_uop_mem_size; // @[mshrs.scala:109:20]
reg req_uop_mem_signed; // @[mshrs.scala:109:20]
reg req_uop_uses_ldq; // @[mshrs.scala:109:20]
reg req_uop_uses_stq; // @[mshrs.scala:109:20]
reg req_uop_is_unique; // @[mshrs.scala:109:20]
reg req_uop_flush_on_commit; // @[mshrs.scala:109:20]
reg [2:0] req_uop_csr_cmd; // @[mshrs.scala:109:20]
reg req_uop_ldst_is_rs1; // @[mshrs.scala:109:20]
reg [5:0] req_uop_ldst; // @[mshrs.scala:109:20]
reg [5:0] req_uop_lrs1; // @[mshrs.scala:109:20]
reg [5:0] req_uop_lrs2; // @[mshrs.scala:109:20]
reg [5:0] req_uop_lrs3; // @[mshrs.scala:109:20]
reg [1:0] req_uop_dst_rtype; // @[mshrs.scala:109:20]
reg [1:0] req_uop_lrs1_rtype; // @[mshrs.scala:109:20]
reg [1:0] req_uop_lrs2_rtype; // @[mshrs.scala:109:20]
reg req_uop_frs3_en; // @[mshrs.scala:109:20]
reg req_uop_fcn_dw; // @[mshrs.scala:109:20]
reg [4:0] req_uop_fcn_op; // @[mshrs.scala:109:20]
reg req_uop_fp_val; // @[mshrs.scala:109:20]
reg [2:0] req_uop_fp_rm; // @[mshrs.scala:109:20]
reg [1:0] req_uop_fp_typ; // @[mshrs.scala:109:20]
reg req_uop_xcpt_pf_if; // @[mshrs.scala:109:20]
reg req_uop_xcpt_ae_if; // @[mshrs.scala:109:20]
reg req_uop_xcpt_ma_if; // @[mshrs.scala:109:20]
reg req_uop_bp_debug_if; // @[mshrs.scala:109:20]
reg req_uop_bp_xcpt_if; // @[mshrs.scala:109:20]
reg [2:0] req_uop_debug_fsrc; // @[mshrs.scala:109:20]
reg [2:0] req_uop_debug_tsrc; // @[mshrs.scala:109:20]
reg [33:0] req_addr; // @[mshrs.scala:109:20]
assign io_commit_addr_0 = req_addr; // @[mshrs.scala:36:7, :109:20]
reg [63:0] req_data; // @[mshrs.scala:109:20]
reg req_is_hella; // @[mshrs.scala:109:20]
reg req_tag_match; // @[mshrs.scala:109:20]
reg [1:0] req_old_meta_coh_state; // @[mshrs.scala:109:20]
reg [21:0] req_old_meta_tag; // @[mshrs.scala:109:20]
assign io_wb_req_bits_tag_0 = req_old_meta_tag; // @[mshrs.scala:36:7, :109:20]
reg [1:0] req_way_en; // @[mshrs.scala:109:20]
assign io_way_bits_0 = req_way_en; // @[mshrs.scala:36:7, :109:20]
assign io_refill_bits_way_en_0 = req_way_en; // @[mshrs.scala:36:7, :109:20]
assign io_meta_write_bits_way_en_0 = req_way_en; // @[mshrs.scala:36:7, :109:20]
assign io_meta_read_bits_way_en_0 = req_way_en; // @[mshrs.scala:36:7, :109:20]
assign io_wb_req_bits_way_en_0 = req_way_en; // @[mshrs.scala:36:7, :109:20]
reg [4:0] req_sdq_id; // @[mshrs.scala:109:20]
assign req_idx = req_addr[9:6]; // @[mshrs.scala:109:20, :110:25]
assign io_idx_bits_0 = req_idx; // @[mshrs.scala:36:7, :110:25]
assign io_meta_write_bits_idx_0 = req_idx; // @[mshrs.scala:36:7, :110:25]
assign io_meta_read_bits_idx_0 = req_idx; // @[mshrs.scala:36:7, :110:25]
assign io_wb_req_bits_idx_0 = req_idx; // @[mshrs.scala:36:7, :110:25]
assign req_tag = req_addr[33:10]; // @[mshrs.scala:109:20, :111:26]
assign io_tag_bits_0 = req_tag; // @[mshrs.scala:36:7, :111:26]
wire [27:0] _req_block_addr_T = req_addr[33:6]; // @[mshrs.scala:109:20, :112:34]
wire [33:0] req_block_addr = {_req_block_addr_T, 6'h0}; // @[mshrs.scala:112:{34,51}]
reg req_needs_wb; // @[mshrs.scala:113:29]
reg [1:0] new_coh_state; // @[mshrs.scala:115:24]
wire [3:0] _r_T_6 = {2'h2, req_old_meta_coh_state}; // @[Metadata.scala:120:19]
wire _r_T_19 = _r_T_6 == 4'h8; // @[Misc.scala:56:20]
wire [2:0] _r_T_21 = _r_T_19 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20]
wire _r_T_23 = _r_T_6 == 4'h9; // @[Misc.scala:56:20]
wire [2:0] _r_T_25 = _r_T_23 ? 3'h2 : _r_T_21; // @[Misc.scala:38:36, :56:20]
wire _r_T_27 = _r_T_6 == 4'hA; // @[Misc.scala:56:20]
wire [2:0] _r_T_29 = _r_T_27 ? 3'h1 : _r_T_25; // @[Misc.scala:38:36, :56:20]
wire _r_T_31 = _r_T_6 == 4'hB; // @[Misc.scala:56:20]
wire _r_T_32 = _r_T_31; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_33 = _r_T_31 ? 3'h1 : _r_T_29; // @[Misc.scala:38:36, :56:20]
wire _r_T_35 = _r_T_6 == 4'h4; // @[Misc.scala:56:20]
wire _r_T_36 = ~_r_T_35 & _r_T_32; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_37 = _r_T_35 ? 3'h5 : _r_T_33; // @[Misc.scala:38:36, :56:20]
wire _r_T_39 = _r_T_6 == 4'h5; // @[Misc.scala:56:20]
wire _r_T_40 = ~_r_T_39 & _r_T_36; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_41 = _r_T_39 ? 3'h4 : _r_T_37; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_42 = {1'h0, _r_T_39}; // @[Misc.scala:38:63, :56:20]
wire _r_T_43 = _r_T_6 == 4'h6; // @[Misc.scala:56:20]
wire _r_T_44 = ~_r_T_43 & _r_T_40; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_45 = _r_T_43 ? 3'h0 : _r_T_41; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_46 = _r_T_43 ? 2'h1 : _r_T_42; // @[Misc.scala:38:63, :56:20]
wire _r_T_47 = _r_T_6 == 4'h7; // @[Misc.scala:56:20]
wire _r_T_48 = _r_T_47 | _r_T_44; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_49 = _r_T_47 ? 3'h0 : _r_T_45; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_50 = _r_T_47 ? 2'h1 : _r_T_46; // @[Misc.scala:38:63, :56:20]
wire _r_T_51 = _r_T_6 == 4'h0; // @[Misc.scala:56:20]
wire _r_T_52 = ~_r_T_51 & _r_T_48; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_53 = _r_T_51 ? 3'h5 : _r_T_49; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_54 = _r_T_51 ? 2'h0 : _r_T_50; // @[Misc.scala:38:63, :56:20]
wire _r_T_55 = _r_T_6 == 4'h1; // @[Misc.scala:56:20]
wire _r_T_56 = ~_r_T_55 & _r_T_52; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_57 = _r_T_55 ? 3'h4 : _r_T_53; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_58 = _r_T_55 ? 2'h1 : _r_T_54; // @[Misc.scala:38:63, :56:20]
wire _r_T_59 = _r_T_6 == 4'h2; // @[Misc.scala:56:20]
wire _r_T_60 = ~_r_T_59 & _r_T_56; // @[Misc.scala:38:9, :56:20]
wire [2:0] _r_T_61 = _r_T_59 ? 3'h3 : _r_T_57; // @[Misc.scala:38:36, :56:20]
wire [1:0] _r_T_62 = _r_T_59 ? 2'h2 : _r_T_58; // @[Misc.scala:38:63, :56:20]
wire _r_T_63 = _r_T_6 == 4'h3; // @[Misc.scala:56:20]
wire r_1 = _r_T_63 | _r_T_60; // @[Misc.scala:38:9, :56:20]
assign shrink_param = _r_T_63 ? 3'h3 : _r_T_61; // @[Misc.scala:38:36, :56:20]
assign io_wb_req_bits_param_0 = shrink_param; // @[Misc.scala:38:36]
wire [1:0] r_3 = _r_T_63 ? 2'h2 : _r_T_62; // @[Misc.scala:38:63, :56:20]
wire [1:0] coh_on_clear_state = r_3; // @[Misc.scala:38:63]
wire _GEN = req_uop_mem_cmd == 5'h1; // @[Consts.scala:90:32]
wire _grow_param_r_c_cat_T; // @[Consts.scala:90:32]
assign _grow_param_r_c_cat_T = _GEN; // @[Consts.scala:90:32]
wire _grow_param_r_c_cat_T_23; // @[Consts.scala:90:32]
assign _grow_param_r_c_cat_T_23 = _GEN; // @[Consts.scala:90:32]
wire _coh_on_grant_c_cat_T; // @[Consts.scala:90:32]
assign _coh_on_grant_c_cat_T = _GEN; // @[Consts.scala:90:32]
wire _coh_on_grant_c_cat_T_23; // @[Consts.scala:90:32]
assign _coh_on_grant_c_cat_T_23 = _GEN; // @[Consts.scala:90:32]
wire _r1_c_cat_T; // @[Consts.scala:90:32]
assign _r1_c_cat_T = _GEN; // @[Consts.scala:90:32]
wire _r1_c_cat_T_23; // @[Consts.scala:90:32]
assign _r1_c_cat_T_23 = _GEN; // @[Consts.scala:90:32]
wire _needs_second_acq_T_27; // @[Consts.scala:90:32]
assign _needs_second_acq_T_27 = _GEN; // @[Consts.scala:90:32]
wire _GEN_0 = req_uop_mem_cmd == 5'h11; // @[Consts.scala:90:49]
wire _grow_param_r_c_cat_T_1; // @[Consts.scala:90:49]
assign _grow_param_r_c_cat_T_1 = _GEN_0; // @[Consts.scala:90:49]
wire _grow_param_r_c_cat_T_24; // @[Consts.scala:90:49]
assign _grow_param_r_c_cat_T_24 = _GEN_0; // @[Consts.scala:90:49]
wire _coh_on_grant_c_cat_T_1; // @[Consts.scala:90:49]
assign _coh_on_grant_c_cat_T_1 = _GEN_0; // @[Consts.scala:90:49]
wire _coh_on_grant_c_cat_T_24; // @[Consts.scala:90:49]
assign _coh_on_grant_c_cat_T_24 = _GEN_0; // @[Consts.scala:90:49]
wire _r1_c_cat_T_1; // @[Consts.scala:90:49]
assign _r1_c_cat_T_1 = _GEN_0; // @[Consts.scala:90:49]
wire _r1_c_cat_T_24; // @[Consts.scala:90:49]
assign _r1_c_cat_T_24 = _GEN_0; // @[Consts.scala:90:49]
wire _needs_second_acq_T_28; // @[Consts.scala:90:49]
assign _needs_second_acq_T_28 = _GEN_0; // @[Consts.scala:90:49]
wire _grow_param_r_c_cat_T_2 = _grow_param_r_c_cat_T | _grow_param_r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _GEN_1 = req_uop_mem_cmd == 5'h7; // @[Consts.scala:90:66]
wire _grow_param_r_c_cat_T_3; // @[Consts.scala:90:66]
assign _grow_param_r_c_cat_T_3 = _GEN_1; // @[Consts.scala:90:66]
wire _grow_param_r_c_cat_T_26; // @[Consts.scala:90:66]
assign _grow_param_r_c_cat_T_26 = _GEN_1; // @[Consts.scala:90:66]
wire _coh_on_grant_c_cat_T_3; // @[Consts.scala:90:66]
assign _coh_on_grant_c_cat_T_3 = _GEN_1; // @[Consts.scala:90:66]
wire _coh_on_grant_c_cat_T_26; // @[Consts.scala:90:66]
assign _coh_on_grant_c_cat_T_26 = _GEN_1; // @[Consts.scala:90:66]
wire _r1_c_cat_T_3; // @[Consts.scala:90:66]
assign _r1_c_cat_T_3 = _GEN_1; // @[Consts.scala:90:66]
wire _r1_c_cat_T_26; // @[Consts.scala:90:66]
assign _r1_c_cat_T_26 = _GEN_1; // @[Consts.scala:90:66]
wire _needs_second_acq_T_30; // @[Consts.scala:90:66]
assign _needs_second_acq_T_30 = _GEN_1; // @[Consts.scala:90:66]
wire _grow_param_r_c_cat_T_4 = _grow_param_r_c_cat_T_2 | _grow_param_r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _GEN_2 = req_uop_mem_cmd == 5'h4; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_5; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_5 = _GEN_2; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_28; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_28 = _GEN_2; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_5; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_5 = _GEN_2; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_28; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_28 = _GEN_2; // @[package.scala:16:47]
wire _r1_c_cat_T_5; // @[package.scala:16:47]
assign _r1_c_cat_T_5 = _GEN_2; // @[package.scala:16:47]
wire _r1_c_cat_T_28; // @[package.scala:16:47]
assign _r1_c_cat_T_28 = _GEN_2; // @[package.scala:16:47]
wire _needs_second_acq_T_32; // @[package.scala:16:47]
assign _needs_second_acq_T_32 = _GEN_2; // @[package.scala:16:47]
wire _GEN_3 = req_uop_mem_cmd == 5'h9; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_6; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_6 = _GEN_3; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_29; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_29 = _GEN_3; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_6; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_6 = _GEN_3; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_29; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_29 = _GEN_3; // @[package.scala:16:47]
wire _r1_c_cat_T_6; // @[package.scala:16:47]
assign _r1_c_cat_T_6 = _GEN_3; // @[package.scala:16:47]
wire _r1_c_cat_T_29; // @[package.scala:16:47]
assign _r1_c_cat_T_29 = _GEN_3; // @[package.scala:16:47]
wire _needs_second_acq_T_33; // @[package.scala:16:47]
assign _needs_second_acq_T_33 = _GEN_3; // @[package.scala:16:47]
wire _GEN_4 = req_uop_mem_cmd == 5'hA; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_7; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_7 = _GEN_4; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_30; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_30 = _GEN_4; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_7; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_7 = _GEN_4; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_30; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_30 = _GEN_4; // @[package.scala:16:47]
wire _r1_c_cat_T_7; // @[package.scala:16:47]
assign _r1_c_cat_T_7 = _GEN_4; // @[package.scala:16:47]
wire _r1_c_cat_T_30; // @[package.scala:16:47]
assign _r1_c_cat_T_30 = _GEN_4; // @[package.scala:16:47]
wire _needs_second_acq_T_34; // @[package.scala:16:47]
assign _needs_second_acq_T_34 = _GEN_4; // @[package.scala:16:47]
wire _GEN_5 = req_uop_mem_cmd == 5'hB; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_8; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_8 = _GEN_5; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_31; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_31 = _GEN_5; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_8; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_8 = _GEN_5; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_31; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_31 = _GEN_5; // @[package.scala:16:47]
wire _r1_c_cat_T_8; // @[package.scala:16:47]
assign _r1_c_cat_T_8 = _GEN_5; // @[package.scala:16:47]
wire _r1_c_cat_T_31; // @[package.scala:16:47]
assign _r1_c_cat_T_31 = _GEN_5; // @[package.scala:16:47]
wire _needs_second_acq_T_35; // @[package.scala:16:47]
assign _needs_second_acq_T_35 = _GEN_5; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_9 = _grow_param_r_c_cat_T_5 | _grow_param_r_c_cat_T_6; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_10 = _grow_param_r_c_cat_T_9 | _grow_param_r_c_cat_T_7; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_11 = _grow_param_r_c_cat_T_10 | _grow_param_r_c_cat_T_8; // @[package.scala:16:47, :81:59]
wire _GEN_6 = req_uop_mem_cmd == 5'h8; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_12; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_12 = _GEN_6; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_35; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_35 = _GEN_6; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_12; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_12 = _GEN_6; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_35; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_35 = _GEN_6; // @[package.scala:16:47]
wire _r1_c_cat_T_12; // @[package.scala:16:47]
assign _r1_c_cat_T_12 = _GEN_6; // @[package.scala:16:47]
wire _r1_c_cat_T_35; // @[package.scala:16:47]
assign _r1_c_cat_T_35 = _GEN_6; // @[package.scala:16:47]
wire _needs_second_acq_T_39; // @[package.scala:16:47]
assign _needs_second_acq_T_39 = _GEN_6; // @[package.scala:16:47]
wire _GEN_7 = req_uop_mem_cmd == 5'hC; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_13; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_13 = _GEN_7; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_36; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_36 = _GEN_7; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_13; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_13 = _GEN_7; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_36; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_36 = _GEN_7; // @[package.scala:16:47]
wire _r1_c_cat_T_13; // @[package.scala:16:47]
assign _r1_c_cat_T_13 = _GEN_7; // @[package.scala:16:47]
wire _r1_c_cat_T_36; // @[package.scala:16:47]
assign _r1_c_cat_T_36 = _GEN_7; // @[package.scala:16:47]
wire _needs_second_acq_T_40; // @[package.scala:16:47]
assign _needs_second_acq_T_40 = _GEN_7; // @[package.scala:16:47]
wire _GEN_8 = req_uop_mem_cmd == 5'hD; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_14; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_14 = _GEN_8; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_37; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_37 = _GEN_8; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_14; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_14 = _GEN_8; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_37; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_37 = _GEN_8; // @[package.scala:16:47]
wire _r1_c_cat_T_14; // @[package.scala:16:47]
assign _r1_c_cat_T_14 = _GEN_8; // @[package.scala:16:47]
wire _r1_c_cat_T_37; // @[package.scala:16:47]
assign _r1_c_cat_T_37 = _GEN_8; // @[package.scala:16:47]
wire _needs_second_acq_T_41; // @[package.scala:16:47]
assign _needs_second_acq_T_41 = _GEN_8; // @[package.scala:16:47]
wire _GEN_9 = req_uop_mem_cmd == 5'hE; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_15; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_15 = _GEN_9; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_38; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_38 = _GEN_9; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_15; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_15 = _GEN_9; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_38; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_38 = _GEN_9; // @[package.scala:16:47]
wire _r1_c_cat_T_15; // @[package.scala:16:47]
assign _r1_c_cat_T_15 = _GEN_9; // @[package.scala:16:47]
wire _r1_c_cat_T_38; // @[package.scala:16:47]
assign _r1_c_cat_T_38 = _GEN_9; // @[package.scala:16:47]
wire _needs_second_acq_T_42; // @[package.scala:16:47]
assign _needs_second_acq_T_42 = _GEN_9; // @[package.scala:16:47]
wire _GEN_10 = req_uop_mem_cmd == 5'hF; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_16; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_16 = _GEN_10; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_39; // @[package.scala:16:47]
assign _grow_param_r_c_cat_T_39 = _GEN_10; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_16; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_16 = _GEN_10; // @[package.scala:16:47]
wire _coh_on_grant_c_cat_T_39; // @[package.scala:16:47]
assign _coh_on_grant_c_cat_T_39 = _GEN_10; // @[package.scala:16:47]
wire _r1_c_cat_T_16; // @[package.scala:16:47]
assign _r1_c_cat_T_16 = _GEN_10; // @[package.scala:16:47]
wire _r1_c_cat_T_39; // @[package.scala:16:47]
assign _r1_c_cat_T_39 = _GEN_10; // @[package.scala:16:47]
wire _needs_second_acq_T_43; // @[package.scala:16:47]
assign _needs_second_acq_T_43 = _GEN_10; // @[package.scala:16:47]
wire _grow_param_r_c_cat_T_17 = _grow_param_r_c_cat_T_12 | _grow_param_r_c_cat_T_13; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_18 = _grow_param_r_c_cat_T_17 | _grow_param_r_c_cat_T_14; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_19 = _grow_param_r_c_cat_T_18 | _grow_param_r_c_cat_T_15; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_20 = _grow_param_r_c_cat_T_19 | _grow_param_r_c_cat_T_16; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_21 = _grow_param_r_c_cat_T_11 | _grow_param_r_c_cat_T_20; // @[package.scala:81:59]
wire _grow_param_r_c_cat_T_22 = _grow_param_r_c_cat_T_4 | _grow_param_r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _grow_param_r_c_cat_T_25 = _grow_param_r_c_cat_T_23 | _grow_param_r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _grow_param_r_c_cat_T_27 = _grow_param_r_c_cat_T_25 | _grow_param_r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _grow_param_r_c_cat_T_32 = _grow_param_r_c_cat_T_28 | _grow_param_r_c_cat_T_29; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_33 = _grow_param_r_c_cat_T_32 | _grow_param_r_c_cat_T_30; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_34 = _grow_param_r_c_cat_T_33 | _grow_param_r_c_cat_T_31; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_40 = _grow_param_r_c_cat_T_35 | _grow_param_r_c_cat_T_36; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_41 = _grow_param_r_c_cat_T_40 | _grow_param_r_c_cat_T_37; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_42 = _grow_param_r_c_cat_T_41 | _grow_param_r_c_cat_T_38; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_43 = _grow_param_r_c_cat_T_42 | _grow_param_r_c_cat_T_39; // @[package.scala:16:47, :81:59]
wire _grow_param_r_c_cat_T_44 = _grow_param_r_c_cat_T_34 | _grow_param_r_c_cat_T_43; // @[package.scala:81:59]
wire _grow_param_r_c_cat_T_45 = _grow_param_r_c_cat_T_27 | _grow_param_r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _GEN_11 = req_uop_mem_cmd == 5'h3; // @[Consts.scala:91:54]
wire _grow_param_r_c_cat_T_46; // @[Consts.scala:91:54]
assign _grow_param_r_c_cat_T_46 = _GEN_11; // @[Consts.scala:91:54]
wire _coh_on_grant_c_cat_T_46; // @[Consts.scala:91:54]
assign _coh_on_grant_c_cat_T_46 = _GEN_11; // @[Consts.scala:91:54]
wire _r1_c_cat_T_46; // @[Consts.scala:91:54]
assign _r1_c_cat_T_46 = _GEN_11; // @[Consts.scala:91:54]
wire _needs_second_acq_T_50; // @[Consts.scala:91:54]
assign _needs_second_acq_T_50 = _GEN_11; // @[Consts.scala:91:54]
wire _grow_param_r_c_cat_T_47 = _grow_param_r_c_cat_T_45 | _grow_param_r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _GEN_12 = req_uop_mem_cmd == 5'h6; // @[Consts.scala:91:71]
wire _grow_param_r_c_cat_T_48; // @[Consts.scala:91:71]
assign _grow_param_r_c_cat_T_48 = _GEN_12; // @[Consts.scala:91:71]
wire _coh_on_grant_c_cat_T_48; // @[Consts.scala:91:71]
assign _coh_on_grant_c_cat_T_48 = _GEN_12; // @[Consts.scala:91:71]
wire _r1_c_cat_T_48; // @[Consts.scala:91:71]
assign _r1_c_cat_T_48 = _GEN_12; // @[Consts.scala:91:71]
wire _needs_second_acq_T_52; // @[Consts.scala:91:71]
assign _needs_second_acq_T_52 = _GEN_12; // @[Consts.scala:91:71]
wire _grow_param_r_c_cat_T_49 = _grow_param_r_c_cat_T_47 | _grow_param_r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] grow_param_r_c = {_grow_param_r_c_cat_T_22, _grow_param_r_c_cat_T_49}; // @[Metadata.scala:29:18]
wire [3:0] _grow_param_r_T = {grow_param_r_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19]
wire _grow_param_r_T_25 = _grow_param_r_T == 4'hC; // @[Misc.scala:49:20]
wire [1:0] _grow_param_r_T_27 = {1'h0, _grow_param_r_T_25}; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_28 = _grow_param_r_T == 4'hD; // @[Misc.scala:49:20]
wire [1:0] _grow_param_r_T_30 = _grow_param_r_T_28 ? 2'h2 : _grow_param_r_T_27; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_31 = _grow_param_r_T == 4'h4; // @[Misc.scala:49:20]
wire [1:0] _grow_param_r_T_33 = _grow_param_r_T_31 ? 2'h1 : _grow_param_r_T_30; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_34 = _grow_param_r_T == 4'h5; // @[Misc.scala:49:20]
wire [1:0] _grow_param_r_T_36 = _grow_param_r_T_34 ? 2'h2 : _grow_param_r_T_33; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_37 = _grow_param_r_T == 4'h0; // @[Misc.scala:49:20]
wire [1:0] _grow_param_r_T_39 = _grow_param_r_T_37 ? 2'h0 : _grow_param_r_T_36; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_40 = _grow_param_r_T == 4'hE; // @[Misc.scala:49:20]
wire _grow_param_r_T_41 = _grow_param_r_T_40; // @[Misc.scala:35:9, :49:20]
wire [1:0] _grow_param_r_T_42 = _grow_param_r_T_40 ? 2'h3 : _grow_param_r_T_39; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_43 = &_grow_param_r_T; // @[Misc.scala:49:20]
wire _grow_param_r_T_44 = _grow_param_r_T_43 | _grow_param_r_T_41; // @[Misc.scala:35:9, :49:20]
wire [1:0] _grow_param_r_T_45 = _grow_param_r_T_43 ? 2'h3 : _grow_param_r_T_42; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_46 = _grow_param_r_T == 4'h6; // @[Misc.scala:49:20]
wire _grow_param_r_T_47 = _grow_param_r_T_46 | _grow_param_r_T_44; // @[Misc.scala:35:9, :49:20]
wire [1:0] _grow_param_r_T_48 = _grow_param_r_T_46 ? 2'h2 : _grow_param_r_T_45; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_49 = _grow_param_r_T == 4'h7; // @[Misc.scala:49:20]
wire _grow_param_r_T_50 = _grow_param_r_T_49 | _grow_param_r_T_47; // @[Misc.scala:35:9, :49:20]
wire [1:0] _grow_param_r_T_51 = _grow_param_r_T_49 ? 2'h3 : _grow_param_r_T_48; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_52 = _grow_param_r_T == 4'h1; // @[Misc.scala:49:20]
wire _grow_param_r_T_53 = _grow_param_r_T_52 | _grow_param_r_T_50; // @[Misc.scala:35:9, :49:20]
wire [1:0] _grow_param_r_T_54 = _grow_param_r_T_52 ? 2'h1 : _grow_param_r_T_51; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_55 = _grow_param_r_T == 4'h2; // @[Misc.scala:49:20]
wire _grow_param_r_T_56 = _grow_param_r_T_55 | _grow_param_r_T_53; // @[Misc.scala:35:9, :49:20]
wire [1:0] _grow_param_r_T_57 = _grow_param_r_T_55 ? 2'h2 : _grow_param_r_T_54; // @[Misc.scala:35:36, :49:20]
wire _grow_param_r_T_58 = _grow_param_r_T == 4'h3; // @[Misc.scala:49:20]
wire grow_param_r_1 = _grow_param_r_T_58 | _grow_param_r_T_56; // @[Misc.scala:35:9, :49:20]
wire [1:0] grow_param = _grow_param_r_T_58 ? 2'h3 : _grow_param_r_T_57; // @[Misc.scala:35:36, :49:20]
wire [1:0] grow_param_meta_state = grow_param; // @[Misc.scala:35:36]
wire _coh_on_grant_c_cat_T_2 = _coh_on_grant_c_cat_T | _coh_on_grant_c_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _coh_on_grant_c_cat_T_4 = _coh_on_grant_c_cat_T_2 | _coh_on_grant_c_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _coh_on_grant_c_cat_T_9 = _coh_on_grant_c_cat_T_5 | _coh_on_grant_c_cat_T_6; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_10 = _coh_on_grant_c_cat_T_9 | _coh_on_grant_c_cat_T_7; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_11 = _coh_on_grant_c_cat_T_10 | _coh_on_grant_c_cat_T_8; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_17 = _coh_on_grant_c_cat_T_12 | _coh_on_grant_c_cat_T_13; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_18 = _coh_on_grant_c_cat_T_17 | _coh_on_grant_c_cat_T_14; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_19 = _coh_on_grant_c_cat_T_18 | _coh_on_grant_c_cat_T_15; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_20 = _coh_on_grant_c_cat_T_19 | _coh_on_grant_c_cat_T_16; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_21 = _coh_on_grant_c_cat_T_11 | _coh_on_grant_c_cat_T_20; // @[package.scala:81:59]
wire _coh_on_grant_c_cat_T_22 = _coh_on_grant_c_cat_T_4 | _coh_on_grant_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _coh_on_grant_c_cat_T_25 = _coh_on_grant_c_cat_T_23 | _coh_on_grant_c_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _coh_on_grant_c_cat_T_27 = _coh_on_grant_c_cat_T_25 | _coh_on_grant_c_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _coh_on_grant_c_cat_T_32 = _coh_on_grant_c_cat_T_28 | _coh_on_grant_c_cat_T_29; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_33 = _coh_on_grant_c_cat_T_32 | _coh_on_grant_c_cat_T_30; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_34 = _coh_on_grant_c_cat_T_33 | _coh_on_grant_c_cat_T_31; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_40 = _coh_on_grant_c_cat_T_35 | _coh_on_grant_c_cat_T_36; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_41 = _coh_on_grant_c_cat_T_40 | _coh_on_grant_c_cat_T_37; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_42 = _coh_on_grant_c_cat_T_41 | _coh_on_grant_c_cat_T_38; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_43 = _coh_on_grant_c_cat_T_42 | _coh_on_grant_c_cat_T_39; // @[package.scala:16:47, :81:59]
wire _coh_on_grant_c_cat_T_44 = _coh_on_grant_c_cat_T_34 | _coh_on_grant_c_cat_T_43; // @[package.scala:81:59]
wire _coh_on_grant_c_cat_T_45 = _coh_on_grant_c_cat_T_27 | _coh_on_grant_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _coh_on_grant_c_cat_T_47 = _coh_on_grant_c_cat_T_45 | _coh_on_grant_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _coh_on_grant_c_cat_T_49 = _coh_on_grant_c_cat_T_47 | _coh_on_grant_c_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] coh_on_grant_c = {_coh_on_grant_c_cat_T_22, _coh_on_grant_c_cat_T_49}; // @[Metadata.scala:29:18]
wire [3:0] _coh_on_grant_T = {coh_on_grant_c, io_mem_grant_bits_param_0}; // @[Metadata.scala:29:18, :84:18]
wire _coh_on_grant_T_9 = _coh_on_grant_T == 4'h1; // @[Metadata.scala:84:{18,38}]
wire [1:0] _coh_on_grant_T_10 = {1'h0, _coh_on_grant_T_9}; // @[Metadata.scala:84:38]
wire _coh_on_grant_T_11 = _coh_on_grant_T == 4'h0; // @[Metadata.scala:84:{18,38}]
wire [1:0] _coh_on_grant_T_12 = _coh_on_grant_T_11 ? 2'h2 : _coh_on_grant_T_10; // @[Metadata.scala:84:38]
wire _coh_on_grant_T_13 = _coh_on_grant_T == 4'h4; // @[Metadata.scala:84:{18,38}]
wire [1:0] _coh_on_grant_T_14 = _coh_on_grant_T_13 ? 2'h2 : _coh_on_grant_T_12; // @[Metadata.scala:84:38]
wire _coh_on_grant_T_15 = _coh_on_grant_T == 4'hC; // @[Metadata.scala:84:{18,38}]
wire [1:0] _coh_on_grant_T_16 = _coh_on_grant_T_15 ? 2'h3 : _coh_on_grant_T_14; // @[Metadata.scala:84:38]
assign coh_on_grant_state = _coh_on_grant_T_16; // @[Metadata.scala:84:38, :160:20]
assign io_commit_coh_state_0 = coh_on_grant_state; // @[Metadata.scala:160:20]
wire _r1_c_cat_T_2 = _r1_c_cat_T | _r1_c_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _r1_c_cat_T_4 = _r1_c_cat_T_2 | _r1_c_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _r1_c_cat_T_9 = _r1_c_cat_T_5 | _r1_c_cat_T_6; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_10 = _r1_c_cat_T_9 | _r1_c_cat_T_7; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_11 = _r1_c_cat_T_10 | _r1_c_cat_T_8; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_17 = _r1_c_cat_T_12 | _r1_c_cat_T_13; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_18 = _r1_c_cat_T_17 | _r1_c_cat_T_14; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_19 = _r1_c_cat_T_18 | _r1_c_cat_T_15; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_20 = _r1_c_cat_T_19 | _r1_c_cat_T_16; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_21 = _r1_c_cat_T_11 | _r1_c_cat_T_20; // @[package.scala:81:59]
wire _r1_c_cat_T_22 = _r1_c_cat_T_4 | _r1_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _r1_c_cat_T_25 = _r1_c_cat_T_23 | _r1_c_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _r1_c_cat_T_27 = _r1_c_cat_T_25 | _r1_c_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _r1_c_cat_T_32 = _r1_c_cat_T_28 | _r1_c_cat_T_29; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_33 = _r1_c_cat_T_32 | _r1_c_cat_T_30; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_34 = _r1_c_cat_T_33 | _r1_c_cat_T_31; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_40 = _r1_c_cat_T_35 | _r1_c_cat_T_36; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_41 = _r1_c_cat_T_40 | _r1_c_cat_T_37; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_42 = _r1_c_cat_T_41 | _r1_c_cat_T_38; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_43 = _r1_c_cat_T_42 | _r1_c_cat_T_39; // @[package.scala:16:47, :81:59]
wire _r1_c_cat_T_44 = _r1_c_cat_T_34 | _r1_c_cat_T_43; // @[package.scala:81:59]
wire _r1_c_cat_T_45 = _r1_c_cat_T_27 | _r1_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _r1_c_cat_T_47 = _r1_c_cat_T_45 | _r1_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _r1_c_cat_T_49 = _r1_c_cat_T_47 | _r1_c_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] r1_c = {_r1_c_cat_T_22, _r1_c_cat_T_49}; // @[Metadata.scala:29:18]
wire [3:0] _r1_T = {r1_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19]
wire _r1_T_25 = _r1_T == 4'hC; // @[Misc.scala:49:20]
wire [1:0] _r1_T_27 = {1'h0, _r1_T_25}; // @[Misc.scala:35:36, :49:20]
wire _r1_T_28 = _r1_T == 4'hD; // @[Misc.scala:49:20]
wire [1:0] _r1_T_30 = _r1_T_28 ? 2'h2 : _r1_T_27; // @[Misc.scala:35:36, :49:20]
wire _r1_T_31 = _r1_T == 4'h4; // @[Misc.scala:49:20]
wire [1:0] _r1_T_33 = _r1_T_31 ? 2'h1 : _r1_T_30; // @[Misc.scala:35:36, :49:20]
wire _r1_T_34 = _r1_T == 4'h5; // @[Misc.scala:49:20]
wire [1:0] _r1_T_36 = _r1_T_34 ? 2'h2 : _r1_T_33; // @[Misc.scala:35:36, :49:20]
wire _r1_T_37 = _r1_T == 4'h0; // @[Misc.scala:49:20]
wire [1:0] _r1_T_39 = _r1_T_37 ? 2'h0 : _r1_T_36; // @[Misc.scala:35:36, :49:20]
wire _r1_T_40 = _r1_T == 4'hE; // @[Misc.scala:49:20]
wire _r1_T_41 = _r1_T_40; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r1_T_42 = _r1_T_40 ? 2'h3 : _r1_T_39; // @[Misc.scala:35:36, :49:20]
wire _r1_T_43 = &_r1_T; // @[Misc.scala:49:20]
wire _r1_T_44 = _r1_T_43 | _r1_T_41; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r1_T_45 = _r1_T_43 ? 2'h3 : _r1_T_42; // @[Misc.scala:35:36, :49:20]
wire _r1_T_46 = _r1_T == 4'h6; // @[Misc.scala:49:20]
wire _r1_T_47 = _r1_T_46 | _r1_T_44; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r1_T_48 = _r1_T_46 ? 2'h2 : _r1_T_45; // @[Misc.scala:35:36, :49:20]
wire _r1_T_49 = _r1_T == 4'h7; // @[Misc.scala:49:20]
wire _r1_T_50 = _r1_T_49 | _r1_T_47; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r1_T_51 = _r1_T_49 ? 2'h3 : _r1_T_48; // @[Misc.scala:35:36, :49:20]
wire _r1_T_52 = _r1_T == 4'h1; // @[Misc.scala:49:20]
wire _r1_T_53 = _r1_T_52 | _r1_T_50; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r1_T_54 = _r1_T_52 ? 2'h1 : _r1_T_51; // @[Misc.scala:35:36, :49:20]
wire _r1_T_55 = _r1_T == 4'h2; // @[Misc.scala:49:20]
wire _r1_T_56 = _r1_T_55 | _r1_T_53; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r1_T_57 = _r1_T_55 ? 2'h2 : _r1_T_54; // @[Misc.scala:35:36, :49:20]
wire _r1_T_58 = _r1_T == 4'h3; // @[Misc.scala:49:20]
wire r1_1 = _r1_T_58 | _r1_T_56; // @[Misc.scala:35:9, :49:20]
wire [1:0] r1_2 = _r1_T_58 ? 2'h3 : _r1_T_57; // @[Misc.scala:35:36, :49:20]
wire _GEN_13 = io_req_uop_mem_cmd_0 == 5'h1; // @[Consts.scala:90:32]
wire _r2_c_cat_T; // @[Consts.scala:90:32]
assign _r2_c_cat_T = _GEN_13; // @[Consts.scala:90:32]
wire _r2_c_cat_T_23; // @[Consts.scala:90:32]
assign _r2_c_cat_T_23 = _GEN_13; // @[Consts.scala:90:32]
wire _needs_second_acq_T; // @[Consts.scala:90:32]
assign _needs_second_acq_T = _GEN_13; // @[Consts.scala:90:32]
wire _dirties_cat_T; // @[Consts.scala:90:32]
assign _dirties_cat_T = _GEN_13; // @[Consts.scala:90:32]
wire _dirties_cat_T_23; // @[Consts.scala:90:32]
assign _dirties_cat_T_23 = _GEN_13; // @[Consts.scala:90:32]
wire _state_r_c_cat_T; // @[Consts.scala:90:32]
assign _state_r_c_cat_T = _GEN_13; // @[Consts.scala:90:32]
wire _state_r_c_cat_T_23; // @[Consts.scala:90:32]
assign _state_r_c_cat_T_23 = _GEN_13; // @[Consts.scala:90:32]
wire _state_T_3; // @[Consts.scala:90:32]
assign _state_T_3 = _GEN_13; // @[Consts.scala:90:32]
wire _r_c_cat_T_50; // @[Consts.scala:90:32]
assign _r_c_cat_T_50 = _GEN_13; // @[Consts.scala:90:32]
wire _r_c_cat_T_73; // @[Consts.scala:90:32]
assign _r_c_cat_T_73 = _GEN_13; // @[Consts.scala:90:32]
wire _state_r_c_cat_T_50; // @[Consts.scala:90:32]
assign _state_r_c_cat_T_50 = _GEN_13; // @[Consts.scala:90:32]
wire _state_r_c_cat_T_73; // @[Consts.scala:90:32]
assign _state_r_c_cat_T_73 = _GEN_13; // @[Consts.scala:90:32]
wire _state_T_37; // @[Consts.scala:90:32]
assign _state_T_37 = _GEN_13; // @[Consts.scala:90:32]
wire _GEN_14 = io_req_uop_mem_cmd_0 == 5'h11; // @[Consts.scala:90:49]
wire _r2_c_cat_T_1; // @[Consts.scala:90:49]
assign _r2_c_cat_T_1 = _GEN_14; // @[Consts.scala:90:49]
wire _r2_c_cat_T_24; // @[Consts.scala:90:49]
assign _r2_c_cat_T_24 = _GEN_14; // @[Consts.scala:90:49]
wire _needs_second_acq_T_1; // @[Consts.scala:90:49]
assign _needs_second_acq_T_1 = _GEN_14; // @[Consts.scala:90:49]
wire _dirties_cat_T_1; // @[Consts.scala:90:49]
assign _dirties_cat_T_1 = _GEN_14; // @[Consts.scala:90:49]
wire _dirties_cat_T_24; // @[Consts.scala:90:49]
assign _dirties_cat_T_24 = _GEN_14; // @[Consts.scala:90:49]
wire _state_r_c_cat_T_1; // @[Consts.scala:90:49]
assign _state_r_c_cat_T_1 = _GEN_14; // @[Consts.scala:90:49]
wire _state_r_c_cat_T_24; // @[Consts.scala:90:49]
assign _state_r_c_cat_T_24 = _GEN_14; // @[Consts.scala:90:49]
wire _state_T_4; // @[Consts.scala:90:49]
assign _state_T_4 = _GEN_14; // @[Consts.scala:90:49]
wire _r_c_cat_T_51; // @[Consts.scala:90:49]
assign _r_c_cat_T_51 = _GEN_14; // @[Consts.scala:90:49]
wire _r_c_cat_T_74; // @[Consts.scala:90:49]
assign _r_c_cat_T_74 = _GEN_14; // @[Consts.scala:90:49]
wire _state_r_c_cat_T_51; // @[Consts.scala:90:49]
assign _state_r_c_cat_T_51 = _GEN_14; // @[Consts.scala:90:49]
wire _state_r_c_cat_T_74; // @[Consts.scala:90:49]
assign _state_r_c_cat_T_74 = _GEN_14; // @[Consts.scala:90:49]
wire _state_T_38; // @[Consts.scala:90:49]
assign _state_T_38 = _GEN_14; // @[Consts.scala:90:49]
wire _r2_c_cat_T_2 = _r2_c_cat_T | _r2_c_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _GEN_15 = io_req_uop_mem_cmd_0 == 5'h7; // @[Consts.scala:90:66]
wire _r2_c_cat_T_3; // @[Consts.scala:90:66]
assign _r2_c_cat_T_3 = _GEN_15; // @[Consts.scala:90:66]
wire _r2_c_cat_T_26; // @[Consts.scala:90:66]
assign _r2_c_cat_T_26 = _GEN_15; // @[Consts.scala:90:66]
wire _needs_second_acq_T_3; // @[Consts.scala:90:66]
assign _needs_second_acq_T_3 = _GEN_15; // @[Consts.scala:90:66]
wire _dirties_cat_T_3; // @[Consts.scala:90:66]
assign _dirties_cat_T_3 = _GEN_15; // @[Consts.scala:90:66]
wire _dirties_cat_T_26; // @[Consts.scala:90:66]
assign _dirties_cat_T_26 = _GEN_15; // @[Consts.scala:90:66]
wire _state_r_c_cat_T_3; // @[Consts.scala:90:66]
assign _state_r_c_cat_T_3 = _GEN_15; // @[Consts.scala:90:66]
wire _state_r_c_cat_T_26; // @[Consts.scala:90:66]
assign _state_r_c_cat_T_26 = _GEN_15; // @[Consts.scala:90:66]
wire _state_T_6; // @[Consts.scala:90:66]
assign _state_T_6 = _GEN_15; // @[Consts.scala:90:66]
wire _r_c_cat_T_53; // @[Consts.scala:90:66]
assign _r_c_cat_T_53 = _GEN_15; // @[Consts.scala:90:66]
wire _r_c_cat_T_76; // @[Consts.scala:90:66]
assign _r_c_cat_T_76 = _GEN_15; // @[Consts.scala:90:66]
wire _state_r_c_cat_T_53; // @[Consts.scala:90:66]
assign _state_r_c_cat_T_53 = _GEN_15; // @[Consts.scala:90:66]
wire _state_r_c_cat_T_76; // @[Consts.scala:90:66]
assign _state_r_c_cat_T_76 = _GEN_15; // @[Consts.scala:90:66]
wire _state_T_40; // @[Consts.scala:90:66]
assign _state_T_40 = _GEN_15; // @[Consts.scala:90:66]
wire _r2_c_cat_T_4 = _r2_c_cat_T_2 | _r2_c_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _GEN_16 = io_req_uop_mem_cmd_0 == 5'h4; // @[package.scala:16:47]
wire _r2_c_cat_T_5; // @[package.scala:16:47]
assign _r2_c_cat_T_5 = _GEN_16; // @[package.scala:16:47]
wire _r2_c_cat_T_28; // @[package.scala:16:47]
assign _r2_c_cat_T_28 = _GEN_16; // @[package.scala:16:47]
wire _needs_second_acq_T_5; // @[package.scala:16:47]
assign _needs_second_acq_T_5 = _GEN_16; // @[package.scala:16:47]
wire _dirties_cat_T_5; // @[package.scala:16:47]
assign _dirties_cat_T_5 = _GEN_16; // @[package.scala:16:47]
wire _dirties_cat_T_28; // @[package.scala:16:47]
assign _dirties_cat_T_28 = _GEN_16; // @[package.scala:16:47]
wire _state_r_c_cat_T_5; // @[package.scala:16:47]
assign _state_r_c_cat_T_5 = _GEN_16; // @[package.scala:16:47]
wire _state_r_c_cat_T_28; // @[package.scala:16:47]
assign _state_r_c_cat_T_28 = _GEN_16; // @[package.scala:16:47]
wire _state_T_8; // @[package.scala:16:47]
assign _state_T_8 = _GEN_16; // @[package.scala:16:47]
wire _r_c_cat_T_55; // @[package.scala:16:47]
assign _r_c_cat_T_55 = _GEN_16; // @[package.scala:16:47]
wire _r_c_cat_T_78; // @[package.scala:16:47]
assign _r_c_cat_T_78 = _GEN_16; // @[package.scala:16:47]
wire _state_r_c_cat_T_55; // @[package.scala:16:47]
assign _state_r_c_cat_T_55 = _GEN_16; // @[package.scala:16:47]
wire _state_r_c_cat_T_78; // @[package.scala:16:47]
assign _state_r_c_cat_T_78 = _GEN_16; // @[package.scala:16:47]
wire _state_T_42; // @[package.scala:16:47]
assign _state_T_42 = _GEN_16; // @[package.scala:16:47]
wire _GEN_17 = io_req_uop_mem_cmd_0 == 5'h9; // @[package.scala:16:47]
wire _r2_c_cat_T_6; // @[package.scala:16:47]
assign _r2_c_cat_T_6 = _GEN_17; // @[package.scala:16:47]
wire _r2_c_cat_T_29; // @[package.scala:16:47]
assign _r2_c_cat_T_29 = _GEN_17; // @[package.scala:16:47]
wire _needs_second_acq_T_6; // @[package.scala:16:47]
assign _needs_second_acq_T_6 = _GEN_17; // @[package.scala:16:47]
wire _dirties_cat_T_6; // @[package.scala:16:47]
assign _dirties_cat_T_6 = _GEN_17; // @[package.scala:16:47]
wire _dirties_cat_T_29; // @[package.scala:16:47]
assign _dirties_cat_T_29 = _GEN_17; // @[package.scala:16:47]
wire _state_r_c_cat_T_6; // @[package.scala:16:47]
assign _state_r_c_cat_T_6 = _GEN_17; // @[package.scala:16:47]
wire _state_r_c_cat_T_29; // @[package.scala:16:47]
assign _state_r_c_cat_T_29 = _GEN_17; // @[package.scala:16:47]
wire _state_T_9; // @[package.scala:16:47]
assign _state_T_9 = _GEN_17; // @[package.scala:16:47]
wire _r_c_cat_T_56; // @[package.scala:16:47]
assign _r_c_cat_T_56 = _GEN_17; // @[package.scala:16:47]
wire _r_c_cat_T_79; // @[package.scala:16:47]
assign _r_c_cat_T_79 = _GEN_17; // @[package.scala:16:47]
wire _state_r_c_cat_T_56; // @[package.scala:16:47]
assign _state_r_c_cat_T_56 = _GEN_17; // @[package.scala:16:47]
wire _state_r_c_cat_T_79; // @[package.scala:16:47]
assign _state_r_c_cat_T_79 = _GEN_17; // @[package.scala:16:47]
wire _state_T_43; // @[package.scala:16:47]
assign _state_T_43 = _GEN_17; // @[package.scala:16:47]
wire _GEN_18 = io_req_uop_mem_cmd_0 == 5'hA; // @[package.scala:16:47]
wire _r2_c_cat_T_7; // @[package.scala:16:47]
assign _r2_c_cat_T_7 = _GEN_18; // @[package.scala:16:47]
wire _r2_c_cat_T_30; // @[package.scala:16:47]
assign _r2_c_cat_T_30 = _GEN_18; // @[package.scala:16:47]
wire _needs_second_acq_T_7; // @[package.scala:16:47]
assign _needs_second_acq_T_7 = _GEN_18; // @[package.scala:16:47]
wire _dirties_cat_T_7; // @[package.scala:16:47]
assign _dirties_cat_T_7 = _GEN_18; // @[package.scala:16:47]
wire _dirties_cat_T_30; // @[package.scala:16:47]
assign _dirties_cat_T_30 = _GEN_18; // @[package.scala:16:47]
wire _state_r_c_cat_T_7; // @[package.scala:16:47]
assign _state_r_c_cat_T_7 = _GEN_18; // @[package.scala:16:47]
wire _state_r_c_cat_T_30; // @[package.scala:16:47]
assign _state_r_c_cat_T_30 = _GEN_18; // @[package.scala:16:47]
wire _state_T_10; // @[package.scala:16:47]
assign _state_T_10 = _GEN_18; // @[package.scala:16:47]
wire _r_c_cat_T_57; // @[package.scala:16:47]
assign _r_c_cat_T_57 = _GEN_18; // @[package.scala:16:47]
wire _r_c_cat_T_80; // @[package.scala:16:47]
assign _r_c_cat_T_80 = _GEN_18; // @[package.scala:16:47]
wire _state_r_c_cat_T_57; // @[package.scala:16:47]
assign _state_r_c_cat_T_57 = _GEN_18; // @[package.scala:16:47]
wire _state_r_c_cat_T_80; // @[package.scala:16:47]
assign _state_r_c_cat_T_80 = _GEN_18; // @[package.scala:16:47]
wire _state_T_44; // @[package.scala:16:47]
assign _state_T_44 = _GEN_18; // @[package.scala:16:47]
wire _GEN_19 = io_req_uop_mem_cmd_0 == 5'hB; // @[package.scala:16:47]
wire _r2_c_cat_T_8; // @[package.scala:16:47]
assign _r2_c_cat_T_8 = _GEN_19; // @[package.scala:16:47]
wire _r2_c_cat_T_31; // @[package.scala:16:47]
assign _r2_c_cat_T_31 = _GEN_19; // @[package.scala:16:47]
wire _needs_second_acq_T_8; // @[package.scala:16:47]
assign _needs_second_acq_T_8 = _GEN_19; // @[package.scala:16:47]
wire _dirties_cat_T_8; // @[package.scala:16:47]
assign _dirties_cat_T_8 = _GEN_19; // @[package.scala:16:47]
wire _dirties_cat_T_31; // @[package.scala:16:47]
assign _dirties_cat_T_31 = _GEN_19; // @[package.scala:16:47]
wire _state_r_c_cat_T_8; // @[package.scala:16:47]
assign _state_r_c_cat_T_8 = _GEN_19; // @[package.scala:16:47]
wire _state_r_c_cat_T_31; // @[package.scala:16:47]
assign _state_r_c_cat_T_31 = _GEN_19; // @[package.scala:16:47]
wire _state_T_11; // @[package.scala:16:47]
assign _state_T_11 = _GEN_19; // @[package.scala:16:47]
wire _r_c_cat_T_58; // @[package.scala:16:47]
assign _r_c_cat_T_58 = _GEN_19; // @[package.scala:16:47]
wire _r_c_cat_T_81; // @[package.scala:16:47]
assign _r_c_cat_T_81 = _GEN_19; // @[package.scala:16:47]
wire _state_r_c_cat_T_58; // @[package.scala:16:47]
assign _state_r_c_cat_T_58 = _GEN_19; // @[package.scala:16:47]
wire _state_r_c_cat_T_81; // @[package.scala:16:47]
assign _state_r_c_cat_T_81 = _GEN_19; // @[package.scala:16:47]
wire _state_T_45; // @[package.scala:16:47]
assign _state_T_45 = _GEN_19; // @[package.scala:16:47]
wire _r2_c_cat_T_9 = _r2_c_cat_T_5 | _r2_c_cat_T_6; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_10 = _r2_c_cat_T_9 | _r2_c_cat_T_7; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_11 = _r2_c_cat_T_10 | _r2_c_cat_T_8; // @[package.scala:16:47, :81:59]
wire _GEN_20 = io_req_uop_mem_cmd_0 == 5'h8; // @[package.scala:16:47]
wire _r2_c_cat_T_12; // @[package.scala:16:47]
assign _r2_c_cat_T_12 = _GEN_20; // @[package.scala:16:47]
wire _r2_c_cat_T_35; // @[package.scala:16:47]
assign _r2_c_cat_T_35 = _GEN_20; // @[package.scala:16:47]
wire _needs_second_acq_T_12; // @[package.scala:16:47]
assign _needs_second_acq_T_12 = _GEN_20; // @[package.scala:16:47]
wire _dirties_cat_T_12; // @[package.scala:16:47]
assign _dirties_cat_T_12 = _GEN_20; // @[package.scala:16:47]
wire _dirties_cat_T_35; // @[package.scala:16:47]
assign _dirties_cat_T_35 = _GEN_20; // @[package.scala:16:47]
wire _state_r_c_cat_T_12; // @[package.scala:16:47]
assign _state_r_c_cat_T_12 = _GEN_20; // @[package.scala:16:47]
wire _state_r_c_cat_T_35; // @[package.scala:16:47]
assign _state_r_c_cat_T_35 = _GEN_20; // @[package.scala:16:47]
wire _state_T_15; // @[package.scala:16:47]
assign _state_T_15 = _GEN_20; // @[package.scala:16:47]
wire _r_c_cat_T_62; // @[package.scala:16:47]
assign _r_c_cat_T_62 = _GEN_20; // @[package.scala:16:47]
wire _r_c_cat_T_85; // @[package.scala:16:47]
assign _r_c_cat_T_85 = _GEN_20; // @[package.scala:16:47]
wire _state_r_c_cat_T_62; // @[package.scala:16:47]
assign _state_r_c_cat_T_62 = _GEN_20; // @[package.scala:16:47]
wire _state_r_c_cat_T_85; // @[package.scala:16:47]
assign _state_r_c_cat_T_85 = _GEN_20; // @[package.scala:16:47]
wire _state_T_49; // @[package.scala:16:47]
assign _state_T_49 = _GEN_20; // @[package.scala:16:47]
wire _GEN_21 = io_req_uop_mem_cmd_0 == 5'hC; // @[package.scala:16:47]
wire _r2_c_cat_T_13; // @[package.scala:16:47]
assign _r2_c_cat_T_13 = _GEN_21; // @[package.scala:16:47]
wire _r2_c_cat_T_36; // @[package.scala:16:47]
assign _r2_c_cat_T_36 = _GEN_21; // @[package.scala:16:47]
wire _needs_second_acq_T_13; // @[package.scala:16:47]
assign _needs_second_acq_T_13 = _GEN_21; // @[package.scala:16:47]
wire _dirties_cat_T_13; // @[package.scala:16:47]
assign _dirties_cat_T_13 = _GEN_21; // @[package.scala:16:47]
wire _dirties_cat_T_36; // @[package.scala:16:47]
assign _dirties_cat_T_36 = _GEN_21; // @[package.scala:16:47]
wire _state_r_c_cat_T_13; // @[package.scala:16:47]
assign _state_r_c_cat_T_13 = _GEN_21; // @[package.scala:16:47]
wire _state_r_c_cat_T_36; // @[package.scala:16:47]
assign _state_r_c_cat_T_36 = _GEN_21; // @[package.scala:16:47]
wire _state_T_16; // @[package.scala:16:47]
assign _state_T_16 = _GEN_21; // @[package.scala:16:47]
wire _r_c_cat_T_63; // @[package.scala:16:47]
assign _r_c_cat_T_63 = _GEN_21; // @[package.scala:16:47]
wire _r_c_cat_T_86; // @[package.scala:16:47]
assign _r_c_cat_T_86 = _GEN_21; // @[package.scala:16:47]
wire _state_r_c_cat_T_63; // @[package.scala:16:47]
assign _state_r_c_cat_T_63 = _GEN_21; // @[package.scala:16:47]
wire _state_r_c_cat_T_86; // @[package.scala:16:47]
assign _state_r_c_cat_T_86 = _GEN_21; // @[package.scala:16:47]
wire _state_T_50; // @[package.scala:16:47]
assign _state_T_50 = _GEN_21; // @[package.scala:16:47]
wire _GEN_22 = io_req_uop_mem_cmd_0 == 5'hD; // @[package.scala:16:47]
wire _r2_c_cat_T_14; // @[package.scala:16:47]
assign _r2_c_cat_T_14 = _GEN_22; // @[package.scala:16:47]
wire _r2_c_cat_T_37; // @[package.scala:16:47]
assign _r2_c_cat_T_37 = _GEN_22; // @[package.scala:16:47]
wire _needs_second_acq_T_14; // @[package.scala:16:47]
assign _needs_second_acq_T_14 = _GEN_22; // @[package.scala:16:47]
wire _dirties_cat_T_14; // @[package.scala:16:47]
assign _dirties_cat_T_14 = _GEN_22; // @[package.scala:16:47]
wire _dirties_cat_T_37; // @[package.scala:16:47]
assign _dirties_cat_T_37 = _GEN_22; // @[package.scala:16:47]
wire _state_r_c_cat_T_14; // @[package.scala:16:47]
assign _state_r_c_cat_T_14 = _GEN_22; // @[package.scala:16:47]
wire _state_r_c_cat_T_37; // @[package.scala:16:47]
assign _state_r_c_cat_T_37 = _GEN_22; // @[package.scala:16:47]
wire _state_T_17; // @[package.scala:16:47]
assign _state_T_17 = _GEN_22; // @[package.scala:16:47]
wire _r_c_cat_T_64; // @[package.scala:16:47]
assign _r_c_cat_T_64 = _GEN_22; // @[package.scala:16:47]
wire _r_c_cat_T_87; // @[package.scala:16:47]
assign _r_c_cat_T_87 = _GEN_22; // @[package.scala:16:47]
wire _state_r_c_cat_T_64; // @[package.scala:16:47]
assign _state_r_c_cat_T_64 = _GEN_22; // @[package.scala:16:47]
wire _state_r_c_cat_T_87; // @[package.scala:16:47]
assign _state_r_c_cat_T_87 = _GEN_22; // @[package.scala:16:47]
wire _state_T_51; // @[package.scala:16:47]
assign _state_T_51 = _GEN_22; // @[package.scala:16:47]
wire _GEN_23 = io_req_uop_mem_cmd_0 == 5'hE; // @[package.scala:16:47]
wire _r2_c_cat_T_15; // @[package.scala:16:47]
assign _r2_c_cat_T_15 = _GEN_23; // @[package.scala:16:47]
wire _r2_c_cat_T_38; // @[package.scala:16:47]
assign _r2_c_cat_T_38 = _GEN_23; // @[package.scala:16:47]
wire _needs_second_acq_T_15; // @[package.scala:16:47]
assign _needs_second_acq_T_15 = _GEN_23; // @[package.scala:16:47]
wire _dirties_cat_T_15; // @[package.scala:16:47]
assign _dirties_cat_T_15 = _GEN_23; // @[package.scala:16:47]
wire _dirties_cat_T_38; // @[package.scala:16:47]
assign _dirties_cat_T_38 = _GEN_23; // @[package.scala:16:47]
wire _state_r_c_cat_T_15; // @[package.scala:16:47]
assign _state_r_c_cat_T_15 = _GEN_23; // @[package.scala:16:47]
wire _state_r_c_cat_T_38; // @[package.scala:16:47]
assign _state_r_c_cat_T_38 = _GEN_23; // @[package.scala:16:47]
wire _state_T_18; // @[package.scala:16:47]
assign _state_T_18 = _GEN_23; // @[package.scala:16:47]
wire _r_c_cat_T_65; // @[package.scala:16:47]
assign _r_c_cat_T_65 = _GEN_23; // @[package.scala:16:47]
wire _r_c_cat_T_88; // @[package.scala:16:47]
assign _r_c_cat_T_88 = _GEN_23; // @[package.scala:16:47]
wire _state_r_c_cat_T_65; // @[package.scala:16:47]
assign _state_r_c_cat_T_65 = _GEN_23; // @[package.scala:16:47]
wire _state_r_c_cat_T_88; // @[package.scala:16:47]
assign _state_r_c_cat_T_88 = _GEN_23; // @[package.scala:16:47]
wire _state_T_52; // @[package.scala:16:47]
assign _state_T_52 = _GEN_23; // @[package.scala:16:47]
wire _GEN_24 = io_req_uop_mem_cmd_0 == 5'hF; // @[package.scala:16:47]
wire _r2_c_cat_T_16; // @[package.scala:16:47]
assign _r2_c_cat_T_16 = _GEN_24; // @[package.scala:16:47]
wire _r2_c_cat_T_39; // @[package.scala:16:47]
assign _r2_c_cat_T_39 = _GEN_24; // @[package.scala:16:47]
wire _needs_second_acq_T_16; // @[package.scala:16:47]
assign _needs_second_acq_T_16 = _GEN_24; // @[package.scala:16:47]
wire _dirties_cat_T_16; // @[package.scala:16:47]
assign _dirties_cat_T_16 = _GEN_24; // @[package.scala:16:47]
wire _dirties_cat_T_39; // @[package.scala:16:47]
assign _dirties_cat_T_39 = _GEN_24; // @[package.scala:16:47]
wire _state_r_c_cat_T_16; // @[package.scala:16:47]
assign _state_r_c_cat_T_16 = _GEN_24; // @[package.scala:16:47]
wire _state_r_c_cat_T_39; // @[package.scala:16:47]
assign _state_r_c_cat_T_39 = _GEN_24; // @[package.scala:16:47]
wire _state_T_19; // @[package.scala:16:47]
assign _state_T_19 = _GEN_24; // @[package.scala:16:47]
wire _r_c_cat_T_66; // @[package.scala:16:47]
assign _r_c_cat_T_66 = _GEN_24; // @[package.scala:16:47]
wire _r_c_cat_T_89; // @[package.scala:16:47]
assign _r_c_cat_T_89 = _GEN_24; // @[package.scala:16:47]
wire _state_r_c_cat_T_66; // @[package.scala:16:47]
assign _state_r_c_cat_T_66 = _GEN_24; // @[package.scala:16:47]
wire _state_r_c_cat_T_89; // @[package.scala:16:47]
assign _state_r_c_cat_T_89 = _GEN_24; // @[package.scala:16:47]
wire _state_T_53; // @[package.scala:16:47]
assign _state_T_53 = _GEN_24; // @[package.scala:16:47]
wire _r2_c_cat_T_17 = _r2_c_cat_T_12 | _r2_c_cat_T_13; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_18 = _r2_c_cat_T_17 | _r2_c_cat_T_14; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_19 = _r2_c_cat_T_18 | _r2_c_cat_T_15; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_20 = _r2_c_cat_T_19 | _r2_c_cat_T_16; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_21 = _r2_c_cat_T_11 | _r2_c_cat_T_20; // @[package.scala:81:59]
wire _r2_c_cat_T_22 = _r2_c_cat_T_4 | _r2_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _r2_c_cat_T_25 = _r2_c_cat_T_23 | _r2_c_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _r2_c_cat_T_27 = _r2_c_cat_T_25 | _r2_c_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _r2_c_cat_T_32 = _r2_c_cat_T_28 | _r2_c_cat_T_29; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_33 = _r2_c_cat_T_32 | _r2_c_cat_T_30; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_34 = _r2_c_cat_T_33 | _r2_c_cat_T_31; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_40 = _r2_c_cat_T_35 | _r2_c_cat_T_36; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_41 = _r2_c_cat_T_40 | _r2_c_cat_T_37; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_42 = _r2_c_cat_T_41 | _r2_c_cat_T_38; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_43 = _r2_c_cat_T_42 | _r2_c_cat_T_39; // @[package.scala:16:47, :81:59]
wire _r2_c_cat_T_44 = _r2_c_cat_T_34 | _r2_c_cat_T_43; // @[package.scala:81:59]
wire _r2_c_cat_T_45 = _r2_c_cat_T_27 | _r2_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _GEN_25 = io_req_uop_mem_cmd_0 == 5'h3; // @[Consts.scala:91:54]
wire _r2_c_cat_T_46; // @[Consts.scala:91:54]
assign _r2_c_cat_T_46 = _GEN_25; // @[Consts.scala:91:54]
wire _needs_second_acq_T_23; // @[Consts.scala:91:54]
assign _needs_second_acq_T_23 = _GEN_25; // @[Consts.scala:91:54]
wire _dirties_cat_T_46; // @[Consts.scala:91:54]
assign _dirties_cat_T_46 = _GEN_25; // @[Consts.scala:91:54]
wire _rpq_io_enq_valid_T_4; // @[Consts.scala:88:52]
assign _rpq_io_enq_valid_T_4 = _GEN_25; // @[Consts.scala:88:52, :91:54]
wire _state_r_c_cat_T_46; // @[Consts.scala:91:54]
assign _state_r_c_cat_T_46 = _GEN_25; // @[Consts.scala:91:54]
wire _r_c_cat_T_96; // @[Consts.scala:91:54]
assign _r_c_cat_T_96 = _GEN_25; // @[Consts.scala:91:54]
wire _state_r_c_cat_T_96; // @[Consts.scala:91:54]
assign _state_r_c_cat_T_96 = _GEN_25; // @[Consts.scala:91:54]
wire _r2_c_cat_T_47 = _r2_c_cat_T_45 | _r2_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _GEN_26 = io_req_uop_mem_cmd_0 == 5'h6; // @[Consts.scala:91:71]
wire _r2_c_cat_T_48; // @[Consts.scala:91:71]
assign _r2_c_cat_T_48 = _GEN_26; // @[Consts.scala:91:71]
wire _needs_second_acq_T_25; // @[Consts.scala:91:71]
assign _needs_second_acq_T_25 = _GEN_26; // @[Consts.scala:91:71]
wire _dirties_cat_T_48; // @[Consts.scala:91:71]
assign _dirties_cat_T_48 = _GEN_26; // @[Consts.scala:91:71]
wire _state_r_c_cat_T_48; // @[Consts.scala:91:71]
assign _state_r_c_cat_T_48 = _GEN_26; // @[Consts.scala:91:71]
wire _r_c_cat_T_98; // @[Consts.scala:91:71]
assign _r_c_cat_T_98 = _GEN_26; // @[Consts.scala:91:71]
wire _state_r_c_cat_T_98; // @[Consts.scala:91:71]
assign _state_r_c_cat_T_98 = _GEN_26; // @[Consts.scala:91:71]
wire _r2_c_cat_T_49 = _r2_c_cat_T_47 | _r2_c_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] r2_c = {_r2_c_cat_T_22, _r2_c_cat_T_49}; // @[Metadata.scala:29:18]
wire [3:0] _r2_T = {r2_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19]
wire _r2_T_25 = _r2_T == 4'hC; // @[Misc.scala:49:20]
wire [1:0] _r2_T_27 = {1'h0, _r2_T_25}; // @[Misc.scala:35:36, :49:20]
wire _r2_T_28 = _r2_T == 4'hD; // @[Misc.scala:49:20]
wire [1:0] _r2_T_30 = _r2_T_28 ? 2'h2 : _r2_T_27; // @[Misc.scala:35:36, :49:20]
wire _r2_T_31 = _r2_T == 4'h4; // @[Misc.scala:49:20]
wire [1:0] _r2_T_33 = _r2_T_31 ? 2'h1 : _r2_T_30; // @[Misc.scala:35:36, :49:20]
wire _r2_T_34 = _r2_T == 4'h5; // @[Misc.scala:49:20]
wire [1:0] _r2_T_36 = _r2_T_34 ? 2'h2 : _r2_T_33; // @[Misc.scala:35:36, :49:20]
wire _r2_T_37 = _r2_T == 4'h0; // @[Misc.scala:49:20]
wire [1:0] _r2_T_39 = _r2_T_37 ? 2'h0 : _r2_T_36; // @[Misc.scala:35:36, :49:20]
wire _r2_T_40 = _r2_T == 4'hE; // @[Misc.scala:49:20]
wire _r2_T_41 = _r2_T_40; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r2_T_42 = _r2_T_40 ? 2'h3 : _r2_T_39; // @[Misc.scala:35:36, :49:20]
wire _r2_T_43 = &_r2_T; // @[Misc.scala:49:20]
wire _r2_T_44 = _r2_T_43 | _r2_T_41; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r2_T_45 = _r2_T_43 ? 2'h3 : _r2_T_42; // @[Misc.scala:35:36, :49:20]
wire _r2_T_46 = _r2_T == 4'h6; // @[Misc.scala:49:20]
wire _r2_T_47 = _r2_T_46 | _r2_T_44; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r2_T_48 = _r2_T_46 ? 2'h2 : _r2_T_45; // @[Misc.scala:35:36, :49:20]
wire _r2_T_49 = _r2_T == 4'h7; // @[Misc.scala:49:20]
wire _r2_T_50 = _r2_T_49 | _r2_T_47; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r2_T_51 = _r2_T_49 ? 2'h3 : _r2_T_48; // @[Misc.scala:35:36, :49:20]
wire _r2_T_52 = _r2_T == 4'h1; // @[Misc.scala:49:20]
wire _r2_T_53 = _r2_T_52 | _r2_T_50; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r2_T_54 = _r2_T_52 ? 2'h1 : _r2_T_51; // @[Misc.scala:35:36, :49:20]
wire _r2_T_55 = _r2_T == 4'h2; // @[Misc.scala:49:20]
wire _r2_T_56 = _r2_T_55 | _r2_T_53; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r2_T_57 = _r2_T_55 ? 2'h2 : _r2_T_54; // @[Misc.scala:35:36, :49:20]
wire _r2_T_58 = _r2_T == 4'h3; // @[Misc.scala:49:20]
wire r2_1 = _r2_T_58 | _r2_T_56; // @[Misc.scala:35:9, :49:20]
wire [1:0] r2_2 = _r2_T_58 ? 2'h3 : _r2_T_57; // @[Misc.scala:35:36, :49:20]
wire _needs_second_acq_T_2 = _needs_second_acq_T | _needs_second_acq_T_1; // @[Consts.scala:90:{32,42,49}]
wire _needs_second_acq_T_4 = _needs_second_acq_T_2 | _needs_second_acq_T_3; // @[Consts.scala:90:{42,59,66}]
wire _needs_second_acq_T_9 = _needs_second_acq_T_5 | _needs_second_acq_T_6; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_10 = _needs_second_acq_T_9 | _needs_second_acq_T_7; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_11 = _needs_second_acq_T_10 | _needs_second_acq_T_8; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_17 = _needs_second_acq_T_12 | _needs_second_acq_T_13; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_18 = _needs_second_acq_T_17 | _needs_second_acq_T_14; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_19 = _needs_second_acq_T_18 | _needs_second_acq_T_15; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_20 = _needs_second_acq_T_19 | _needs_second_acq_T_16; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_21 = _needs_second_acq_T_11 | _needs_second_acq_T_20; // @[package.scala:81:59]
wire _needs_second_acq_T_22 = _needs_second_acq_T_4 | _needs_second_acq_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _needs_second_acq_T_24 = _needs_second_acq_T_22 | _needs_second_acq_T_23; // @[Consts.scala:90:76, :91:{47,54}]
wire _needs_second_acq_T_26 = _needs_second_acq_T_24 | _needs_second_acq_T_25; // @[Consts.scala:91:{47,64,71}]
wire _needs_second_acq_T_29 = _needs_second_acq_T_27 | _needs_second_acq_T_28; // @[Consts.scala:90:{32,42,49}]
wire _needs_second_acq_T_31 = _needs_second_acq_T_29 | _needs_second_acq_T_30; // @[Consts.scala:90:{42,59,66}]
wire _needs_second_acq_T_36 = _needs_second_acq_T_32 | _needs_second_acq_T_33; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_37 = _needs_second_acq_T_36 | _needs_second_acq_T_34; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_38 = _needs_second_acq_T_37 | _needs_second_acq_T_35; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_44 = _needs_second_acq_T_39 | _needs_second_acq_T_40; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_45 = _needs_second_acq_T_44 | _needs_second_acq_T_41; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_46 = _needs_second_acq_T_45 | _needs_second_acq_T_42; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_47 = _needs_second_acq_T_46 | _needs_second_acq_T_43; // @[package.scala:16:47, :81:59]
wire _needs_second_acq_T_48 = _needs_second_acq_T_38 | _needs_second_acq_T_47; // @[package.scala:81:59]
wire _needs_second_acq_T_49 = _needs_second_acq_T_31 | _needs_second_acq_T_48; // @[Consts.scala:87:44, :90:{59,76}]
wire _needs_second_acq_T_51 = _needs_second_acq_T_49 | _needs_second_acq_T_50; // @[Consts.scala:90:76, :91:{47,54}]
wire _needs_second_acq_T_53 = _needs_second_acq_T_51 | _needs_second_acq_T_52; // @[Consts.scala:91:{47,64,71}]
wire _needs_second_acq_T_54 = ~_needs_second_acq_T_53; // @[Metadata.scala:104:57]
wire cmd_requires_second_acquire = _needs_second_acq_T_26 & _needs_second_acq_T_54; // @[Metadata.scala:104:{54,57}]
wire is_hit_again = r1_1 & r2_1; // @[Misc.scala:35:9]
wire _dirties_cat_T_2 = _dirties_cat_T | _dirties_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _dirties_cat_T_4 = _dirties_cat_T_2 | _dirties_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _dirties_cat_T_9 = _dirties_cat_T_5 | _dirties_cat_T_6; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_10 = _dirties_cat_T_9 | _dirties_cat_T_7; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_11 = _dirties_cat_T_10 | _dirties_cat_T_8; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_17 = _dirties_cat_T_12 | _dirties_cat_T_13; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_18 = _dirties_cat_T_17 | _dirties_cat_T_14; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_19 = _dirties_cat_T_18 | _dirties_cat_T_15; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_20 = _dirties_cat_T_19 | _dirties_cat_T_16; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_21 = _dirties_cat_T_11 | _dirties_cat_T_20; // @[package.scala:81:59]
wire _dirties_cat_T_22 = _dirties_cat_T_4 | _dirties_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _dirties_cat_T_25 = _dirties_cat_T_23 | _dirties_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _dirties_cat_T_27 = _dirties_cat_T_25 | _dirties_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _dirties_cat_T_32 = _dirties_cat_T_28 | _dirties_cat_T_29; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_33 = _dirties_cat_T_32 | _dirties_cat_T_30; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_34 = _dirties_cat_T_33 | _dirties_cat_T_31; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_40 = _dirties_cat_T_35 | _dirties_cat_T_36; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_41 = _dirties_cat_T_40 | _dirties_cat_T_37; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_42 = _dirties_cat_T_41 | _dirties_cat_T_38; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_43 = _dirties_cat_T_42 | _dirties_cat_T_39; // @[package.scala:16:47, :81:59]
wire _dirties_cat_T_44 = _dirties_cat_T_34 | _dirties_cat_T_43; // @[package.scala:81:59]
wire _dirties_cat_T_45 = _dirties_cat_T_27 | _dirties_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _dirties_cat_T_47 = _dirties_cat_T_45 | _dirties_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _dirties_cat_T_49 = _dirties_cat_T_47 | _dirties_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] dirties_cat = {_dirties_cat_T_22, _dirties_cat_T_49}; // @[Metadata.scala:29:18]
wire dirties = &dirties_cat; // @[Metadata.scala:29:18, :106:42]
wire [1:0] biggest_grow_param = dirties ? r2_2 : r1_2; // @[Misc.scala:35:36]
wire [1:0] dirtier_coh_state = biggest_grow_param; // @[Metadata.scala:107:33, :160:20]
wire [4:0] dirtier_cmd = dirties ? io_req_uop_mem_cmd_0 : req_uop_mem_cmd; // @[Metadata.scala:106:42, :109:27]
wire _T_16 = io_mem_grant_ready_0 & io_mem_grant_valid_0; // @[Decoupled.scala:51:35]
wire [26:0] _r_beats1_decode_T = 27'hFFF << io_mem_grant_bits_size_0; // @[package.scala:243:71]
wire [11:0] _r_beats1_decode_T_1 = _r_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _r_beats1_decode_T_2 = ~_r_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] r_beats1_decode = _r_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire r_beats1_opdata = io_mem_grant_bits_opcode_0[0]; // @[Edges.scala:106:36]
wire opdata = io_mem_grant_bits_opcode_0[0]; // @[Edges.scala:106:36]
wire grant_had_data_opdata = io_mem_grant_bits_opcode_0[0]; // @[Edges.scala:106:36]
wire [8:0] r_beats1 = r_beats1_opdata ? r_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] r_counter; // @[Edges.scala:229:27]
wire [9:0] _r_counter1_T = {1'h0, r_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] r_counter1 = _r_counter1_T[8:0]; // @[Edges.scala:230:28]
wire r_1_1 = r_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _r_last_T = r_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _r_last_T_1 = r_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire r_2 = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire refill_done = r_2 & _T_16; // @[Decoupled.scala:51:35]
wire [8:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] r_4 = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _r_counter_T = r_1_1 ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] refill_address_inc = {r_4, 3'h0}; // @[Edges.scala:234:25, :269:29]
wire _sec_rdy_T = ~cmd_requires_second_acquire; // @[Metadata.scala:104:54]
wire _sec_rdy_T_1 = ~io_req_is_probe_0; // @[mshrs.scala:36:7, :125:50]
wire _sec_rdy_T_2 = _sec_rdy_T & _sec_rdy_T_1; // @[mshrs.scala:125:{18,47,50}]
wire _sec_rdy_T_3 = ~(|state); // @[package.scala:16:47]
wire _sec_rdy_T_4 = state == 5'hD; // @[package.scala:16:47]
wire _sec_rdy_T_5 = state == 5'hE; // @[package.scala:16:47]
wire _sec_rdy_T_6 = state == 5'hF; // @[package.scala:16:47]
wire _sec_rdy_T_7 = _sec_rdy_T_3 | _sec_rdy_T_4; // @[package.scala:16:47, :81:59]
wire _sec_rdy_T_8 = _sec_rdy_T_7 | _sec_rdy_T_5; // @[package.scala:16:47, :81:59]
wire _sec_rdy_T_9 = _sec_rdy_T_8 | _sec_rdy_T_6; // @[package.scala:16:47, :81:59]
wire _sec_rdy_T_10 = ~_sec_rdy_T_9; // @[package.scala:81:59]
wire sec_rdy = _sec_rdy_T_2 & _sec_rdy_T_10; // @[mshrs.scala:125:{47,67}, :126:18]
wire _rpq_io_enq_valid_T = io_req_pri_val_0 & io_req_pri_rdy_0; // @[mshrs.scala:36:7, :133:40]
wire _rpq_io_enq_valid_T_1 = io_req_sec_val_0 & io_req_sec_rdy_0; // @[mshrs.scala:36:7, :133:78]
wire _rpq_io_enq_valid_T_2 = _rpq_io_enq_valid_T | _rpq_io_enq_valid_T_1; // @[mshrs.scala:133:{40,59,78}]
wire _rpq_io_enq_valid_T_3 = io_req_uop_mem_cmd_0 == 5'h2; // @[Consts.scala:88:35]
wire _rpq_io_enq_valid_T_5 = _rpq_io_enq_valid_T_3 | _rpq_io_enq_valid_T_4; // @[Consts.scala:88:{35,45,52}]
wire _rpq_io_enq_valid_T_6 = ~_rpq_io_enq_valid_T_5; // @[Consts.scala:88:45]
wire _rpq_io_enq_valid_T_7 = _rpq_io_enq_valid_T_2 & _rpq_io_enq_valid_T_6; // @[mshrs.scala:133:{59,98,101}]
reg grantack_valid; // @[mshrs.scala:138:21]
reg [2:0] grantack_bits_sink; // @[mshrs.scala:138:21]
assign io_mem_finish_bits_sink_0 = grantack_bits_sink; // @[mshrs.scala:36:7, :138:21]
reg [2:0] refill_ctr; // @[mshrs.scala:139:24]
reg commit_line; // @[mshrs.scala:140:24]
reg grant_had_data; // @[mshrs.scala:141:27]
reg finish_to_prefetch; // @[mshrs.scala:142:31]
reg [1:0] meta_hazard; // @[mshrs.scala:145:28]
wire [2:0] _meta_hazard_T = {1'h0, meta_hazard} + 3'h1; // @[mshrs.scala:145:28, :146:59]
wire [1:0] _meta_hazard_T_1 = _meta_hazard_T[1:0]; // @[mshrs.scala:146:59]
wire _io_probe_rdy_T = meta_hazard == 2'h0; // @[mshrs.scala:145:28, :148:34]
wire _io_probe_rdy_T_1 = ~(|state); // @[package.scala:16:47]
wire _io_probe_rdy_T_2 = state == 5'h1; // @[package.scala:16:47]
wire _io_probe_rdy_T_3 = state == 5'h2; // @[package.scala:16:47]
wire _io_probe_rdy_T_4 = state == 5'h3; // @[package.scala:16:47]
wire _io_probe_rdy_T_5 = _io_probe_rdy_T_1 | _io_probe_rdy_T_2; // @[package.scala:16:47, :81:59]
wire _io_probe_rdy_T_6 = _io_probe_rdy_T_5 | _io_probe_rdy_T_3; // @[package.scala:16:47, :81:59]
wire _io_probe_rdy_T_7 = _io_probe_rdy_T_6 | _io_probe_rdy_T_4; // @[package.scala:16:47, :81:59]
wire _io_probe_rdy_T_8 = state == 5'h4; // @[mshrs.scala:107:22, :148:129]
wire _io_probe_rdy_T_9 = _io_probe_rdy_T_8 & grantack_valid; // @[mshrs.scala:138:21, :148:{129,145}]
wire _io_probe_rdy_T_10 = _io_probe_rdy_T_7 | _io_probe_rdy_T_9; // @[package.scala:81:59]
assign _io_probe_rdy_T_11 = _io_probe_rdy_T & _io_probe_rdy_T_10; // @[mshrs.scala:148:{34,42,119}]
assign io_probe_rdy_0 = _io_probe_rdy_T_11; // @[mshrs.scala:36:7, :148:42]
assign _io_idx_valid_T = |state; // @[package.scala:16:47]
assign io_idx_valid_0 = _io_idx_valid_T; // @[mshrs.scala:36:7, :149:25]
assign _io_tag_valid_T = |state; // @[package.scala:16:47]
assign io_tag_valid_0 = _io_tag_valid_T; // @[mshrs.scala:36:7, :150:25]
wire _io_way_valid_T = ~(|state); // @[package.scala:16:47]
wire _io_way_valid_T_1 = state == 5'h11; // @[package.scala:16:47]
wire _io_way_valid_T_2 = _io_way_valid_T | _io_way_valid_T_1; // @[package.scala:16:47, :81:59]
assign _io_way_valid_T_3 = ~_io_way_valid_T_2; // @[package.scala:81:59]
assign io_way_valid_0 = _io_way_valid_T_3; // @[mshrs.scala:36:7, :151:19]
assign io_meta_write_bits_tag_0 = req_tag[21:0]; // @[mshrs.scala:36:7, :111:26, :159:31]
assign io_meta_write_bits_data_tag_0 = req_tag[21:0]; // @[mshrs.scala:36:7, :111:26, :159:31]
assign io_meta_read_bits_tag_0 = req_tag[21:0]; // @[mshrs.scala:36:7, :111:26, :159:31]
assign _io_req_sec_rdy_T = sec_rdy & _rpq_io_enq_ready; // @[mshrs.scala:125:67, :128:19, :163:37]
assign io_req_sec_rdy_0 = _io_req_sec_rdy_T; // @[mshrs.scala:36:7, :163:37]
wire [27:0] _GEN_27 = {req_tag, req_idx}; // @[mshrs.scala:110:25, :111:26, :168:26]
wire [27:0] _io_mem_acquire_bits_T; // @[mshrs.scala:168:26]
assign _io_mem_acquire_bits_T = _GEN_27; // @[mshrs.scala:168:26]
wire [27:0] rp_addr_hi; // @[mshrs.scala:271:22]
assign rp_addr_hi = _GEN_27; // @[mshrs.scala:168:26, :271:22]
wire [27:0] hi; // @[mshrs.scala:276:10]
assign hi = _GEN_27; // @[mshrs.scala:168:26, :276:10]
wire [27:0] io_replay_bits_addr_hi; // @[mshrs.scala:341:31]
assign io_replay_bits_addr_hi = _GEN_27; // @[mshrs.scala:168:26, :341:31]
wire [33:0] _io_mem_acquire_bits_T_1 = {_io_mem_acquire_bits_T, 6'h0}; // @[mshrs.scala:168:{26,45}]
wire [33:0] _io_mem_acquire_bits_legal_T_1 = _io_mem_acquire_bits_T_1; // @[Parameters.scala:137:31]
wire [34:0] _io_mem_acquire_bits_legal_T_2 = {1'h0, _io_mem_acquire_bits_legal_T_1}; // @[Parameters.scala:137:{31,41}]
wire [34:0] _io_mem_acquire_bits_legal_T_3 = _io_mem_acquire_bits_legal_T_2 & 35'h80000000; // @[Parameters.scala:137:{41,46}]
wire [34:0] _io_mem_acquire_bits_legal_T_4 = _io_mem_acquire_bits_legal_T_3; // @[Parameters.scala:137:46]
wire _io_mem_acquire_bits_legal_T_5 = _io_mem_acquire_bits_legal_T_4 == 35'h0; // @[Parameters.scala:137:{46,59}]
assign io_mem_acquire_bits_a_address = _io_mem_acquire_bits_T_1[31:0]; // @[Edges.scala:346:17]
wire [33:0] _io_mem_acquire_bits_legal_T_9 = {_io_mem_acquire_bits_T_1[33:32], io_mem_acquire_bits_a_address ^ 32'h80000000}; // @[Edges.scala:346:17]
wire [34:0] _io_mem_acquire_bits_legal_T_10 = {1'h0, _io_mem_acquire_bits_legal_T_9}; // @[Parameters.scala:137:{31,41}]
wire [34:0] _io_mem_acquire_bits_legal_T_11 = _io_mem_acquire_bits_legal_T_10 & 35'h80000000; // @[Parameters.scala:137:{41,46}]
wire [34:0] _io_mem_acquire_bits_legal_T_12 = _io_mem_acquire_bits_legal_T_11; // @[Parameters.scala:137:46]
wire _io_mem_acquire_bits_legal_T_13 = _io_mem_acquire_bits_legal_T_12 == 35'h0; // @[Parameters.scala:137:{46,59}]
wire _io_mem_acquire_bits_legal_T_14 = _io_mem_acquire_bits_legal_T_13; // @[Parameters.scala:684:54]
wire io_mem_acquire_bits_legal = _io_mem_acquire_bits_legal_T_14; // @[Parameters.scala:684:54, :686:26]
assign io_mem_acquire_bits_param_0 = io_mem_acquire_bits_a_param; // @[Edges.scala:346:17]
assign io_mem_acquire_bits_address_0 = io_mem_acquire_bits_a_address; // @[Edges.scala:346:17]
assign io_mem_acquire_bits_a_param = {1'h0, grow_param}; // @[Misc.scala:35:36]
wire io_mem_acquire_bits_a_mask_sub_sub_bit = _io_mem_acquire_bits_T_1[2]; // @[Misc.scala:210:26]
wire io_mem_acquire_bits_a_mask_sub_sub_1_2 = io_mem_acquire_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire io_mem_acquire_bits_a_mask_sub_sub_nbit = ~io_mem_acquire_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire io_mem_acquire_bits_a_mask_sub_sub_0_2 = io_mem_acquire_bits_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _io_mem_acquire_bits_a_mask_sub_sub_acc_T = io_mem_acquire_bits_a_mask_sub_sub_0_2; // @[Misc.scala:214:27, :215:38]
wire _io_mem_acquire_bits_a_mask_sub_sub_acc_T_1 = io_mem_acquire_bits_a_mask_sub_sub_1_2; // @[Misc.scala:214:27, :215:38]
wire io_mem_acquire_bits_a_mask_sub_bit = _io_mem_acquire_bits_T_1[1]; // @[Misc.scala:210:26]
wire io_mem_acquire_bits_a_mask_sub_nbit = ~io_mem_acquire_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire io_mem_acquire_bits_a_mask_sub_0_2 = io_mem_acquire_bits_a_mask_sub_sub_0_2 & io_mem_acquire_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire io_mem_acquire_bits_a_mask_sub_1_2 = io_mem_acquire_bits_a_mask_sub_sub_0_2 & io_mem_acquire_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire io_mem_acquire_bits_a_mask_sub_2_2 = io_mem_acquire_bits_a_mask_sub_sub_1_2 & io_mem_acquire_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire io_mem_acquire_bits_a_mask_sub_3_2 = io_mem_acquire_bits_a_mask_sub_sub_1_2 & io_mem_acquire_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire io_mem_acquire_bits_a_mask_bit = _io_mem_acquire_bits_T_1[0]; // @[Misc.scala:210:26]
wire io_mem_acquire_bits_a_mask_nbit = ~io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :211:20]
wire io_mem_acquire_bits_a_mask_eq = io_mem_acquire_bits_a_mask_sub_0_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _io_mem_acquire_bits_a_mask_acc_T = io_mem_acquire_bits_a_mask_eq; // @[Misc.scala:214:27, :215:38]
wire io_mem_acquire_bits_a_mask_eq_1 = io_mem_acquire_bits_a_mask_sub_0_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _io_mem_acquire_bits_a_mask_acc_T_1 = io_mem_acquire_bits_a_mask_eq_1; // @[Misc.scala:214:27, :215:38]
wire io_mem_acquire_bits_a_mask_eq_2 = io_mem_acquire_bits_a_mask_sub_1_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _io_mem_acquire_bits_a_mask_acc_T_2 = io_mem_acquire_bits_a_mask_eq_2; // @[Misc.scala:214:27, :215:38]
wire io_mem_acquire_bits_a_mask_eq_3 = io_mem_acquire_bits_a_mask_sub_1_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _io_mem_acquire_bits_a_mask_acc_T_3 = io_mem_acquire_bits_a_mask_eq_3; // @[Misc.scala:214:27, :215:38]
wire io_mem_acquire_bits_a_mask_eq_4 = io_mem_acquire_bits_a_mask_sub_2_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _io_mem_acquire_bits_a_mask_acc_T_4 = io_mem_acquire_bits_a_mask_eq_4; // @[Misc.scala:214:27, :215:38]
wire io_mem_acquire_bits_a_mask_eq_5 = io_mem_acquire_bits_a_mask_sub_2_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _io_mem_acquire_bits_a_mask_acc_T_5 = io_mem_acquire_bits_a_mask_eq_5; // @[Misc.scala:214:27, :215:38]
wire io_mem_acquire_bits_a_mask_eq_6 = io_mem_acquire_bits_a_mask_sub_3_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _io_mem_acquire_bits_a_mask_acc_T_6 = io_mem_acquire_bits_a_mask_eq_6; // @[Misc.scala:214:27, :215:38]
wire io_mem_acquire_bits_a_mask_eq_7 = io_mem_acquire_bits_a_mask_sub_3_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _io_mem_acquire_bits_a_mask_acc_T_7 = io_mem_acquire_bits_a_mask_eq_7; // @[Misc.scala:214:27, :215:38]
wire [5:0] _io_refill_bits_addr_T = {refill_ctr, 3'h0}; // @[mshrs.scala:139:24, :172:57]
wire [33:0] _io_refill_bits_addr_T_1 = {req_block_addr[33:6], req_block_addr[5:0] | _io_refill_bits_addr_T}; // @[mshrs.scala:112:51, :172:{43,57}]
assign io_refill_bits_addr_0 = _io_refill_bits_addr_T_1[9:0]; // @[mshrs.scala:36:7, :172:{25,43}]
wire [8:0] _io_lb_write_bits_offset_T = refill_address_inc[11:3]; // @[Edges.scala:269:29]
assign io_lb_write_bits_offset_0 = _io_lb_write_bits_offset_T[2:0]; // @[mshrs.scala:36:7, :197:{27,49}]
wire [30:0] _io_lb_read_offset_T = _rpq_io_deq_bits_addr[33:3]; // @[mshrs.scala:128:19, :200:45]
wire [30:0] _io_lb_read_offset_T_1 = _rpq_io_deq_bits_addr[33:3]; // @[mshrs.scala:128:19, :200:45, :282:47]
wire [4:0] state_new_state; // @[mshrs.scala:210:29]
wire _state_T_1 = ~_state_T; // @[mshrs.scala:213:11]
wire _state_T_2 = ~_rpq_io_enq_ready; // @[mshrs.scala:128:19, :213:11]
wire [3:0] _GEN_28 = {2'h2, io_req_old_meta_coh_state_0}; // @[Metadata.scala:120:19]
wire [3:0] _state_req_needs_wb_r_T_6; // @[Metadata.scala:120:19]
assign _state_req_needs_wb_r_T_6 = _GEN_28; // @[Metadata.scala:120:19]
wire [3:0] _state_req_needs_wb_r_T_70; // @[Metadata.scala:120:19]
assign _state_req_needs_wb_r_T_70 = _GEN_28; // @[Metadata.scala:120:19]
wire _state_req_needs_wb_r_T_19 = _state_req_needs_wb_r_T_6 == 4'h8; // @[Misc.scala:56:20]
wire [2:0] _state_req_needs_wb_r_T_21 = _state_req_needs_wb_r_T_19 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20]
wire _state_req_needs_wb_r_T_23 = _state_req_needs_wb_r_T_6 == 4'h9; // @[Misc.scala:56:20]
wire [2:0] _state_req_needs_wb_r_T_25 = _state_req_needs_wb_r_T_23 ? 3'h2 : _state_req_needs_wb_r_T_21; // @[Misc.scala:38:36, :56:20]
wire _state_req_needs_wb_r_T_27 = _state_req_needs_wb_r_T_6 == 4'hA; // @[Misc.scala:56:20]
wire [2:0] _state_req_needs_wb_r_T_29 = _state_req_needs_wb_r_T_27 ? 3'h1 : _state_req_needs_wb_r_T_25; // @[Misc.scala:38:36, :56:20]
wire _state_req_needs_wb_r_T_31 = _state_req_needs_wb_r_T_6 == 4'hB; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_32 = _state_req_needs_wb_r_T_31; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_33 = _state_req_needs_wb_r_T_31 ? 3'h1 : _state_req_needs_wb_r_T_29; // @[Misc.scala:38:36, :56:20]
wire _state_req_needs_wb_r_T_35 = _state_req_needs_wb_r_T_6 == 4'h4; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_36 = ~_state_req_needs_wb_r_T_35 & _state_req_needs_wb_r_T_32; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_37 = _state_req_needs_wb_r_T_35 ? 3'h5 : _state_req_needs_wb_r_T_33; // @[Misc.scala:38:36, :56:20]
wire _state_req_needs_wb_r_T_39 = _state_req_needs_wb_r_T_6 == 4'h5; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_40 = ~_state_req_needs_wb_r_T_39 & _state_req_needs_wb_r_T_36; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_41 = _state_req_needs_wb_r_T_39 ? 3'h4 : _state_req_needs_wb_r_T_37; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_42 = {1'h0, _state_req_needs_wb_r_T_39}; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_43 = _state_req_needs_wb_r_T_6 == 4'h6; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_44 = ~_state_req_needs_wb_r_T_43 & _state_req_needs_wb_r_T_40; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_45 = _state_req_needs_wb_r_T_43 ? 3'h0 : _state_req_needs_wb_r_T_41; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_46 = _state_req_needs_wb_r_T_43 ? 2'h1 : _state_req_needs_wb_r_T_42; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_47 = _state_req_needs_wb_r_T_6 == 4'h7; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_48 = _state_req_needs_wb_r_T_47 | _state_req_needs_wb_r_T_44; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_49 = _state_req_needs_wb_r_T_47 ? 3'h0 : _state_req_needs_wb_r_T_45; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_50 = _state_req_needs_wb_r_T_47 ? 2'h1 : _state_req_needs_wb_r_T_46; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_51 = _state_req_needs_wb_r_T_6 == 4'h0; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_52 = ~_state_req_needs_wb_r_T_51 & _state_req_needs_wb_r_T_48; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_53 = _state_req_needs_wb_r_T_51 ? 3'h5 : _state_req_needs_wb_r_T_49; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_54 = _state_req_needs_wb_r_T_51 ? 2'h0 : _state_req_needs_wb_r_T_50; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_55 = _state_req_needs_wb_r_T_6 == 4'h1; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_56 = ~_state_req_needs_wb_r_T_55 & _state_req_needs_wb_r_T_52; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_57 = _state_req_needs_wb_r_T_55 ? 3'h4 : _state_req_needs_wb_r_T_53; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_58 = _state_req_needs_wb_r_T_55 ? 2'h1 : _state_req_needs_wb_r_T_54; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_59 = _state_req_needs_wb_r_T_6 == 4'h2; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_60 = ~_state_req_needs_wb_r_T_59 & _state_req_needs_wb_r_T_56; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_61 = _state_req_needs_wb_r_T_59 ? 3'h3 : _state_req_needs_wb_r_T_57; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_62 = _state_req_needs_wb_r_T_59 ? 2'h2 : _state_req_needs_wb_r_T_58; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_63 = _state_req_needs_wb_r_T_6 == 4'h3; // @[Misc.scala:56:20]
wire state_req_needs_wb_r_1 = _state_req_needs_wb_r_T_63 | _state_req_needs_wb_r_T_60; // @[Misc.scala:38:9, :56:20]
wire [2:0] state_req_needs_wb_r_2 = _state_req_needs_wb_r_T_63 ? 3'h3 : _state_req_needs_wb_r_T_61; // @[Misc.scala:38:36, :56:20]
wire [1:0] state_req_needs_wb_r_3 = _state_req_needs_wb_r_T_63 ? 2'h2 : _state_req_needs_wb_r_T_62; // @[Misc.scala:38:63, :56:20]
wire [1:0] state_req_needs_wb_meta_state = state_req_needs_wb_r_3; // @[Misc.scala:38:63]
wire _state_r_c_cat_T_2 = _state_r_c_cat_T | _state_r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _state_r_c_cat_T_4 = _state_r_c_cat_T_2 | _state_r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _state_r_c_cat_T_9 = _state_r_c_cat_T_5 | _state_r_c_cat_T_6; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_10 = _state_r_c_cat_T_9 | _state_r_c_cat_T_7; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_11 = _state_r_c_cat_T_10 | _state_r_c_cat_T_8; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_17 = _state_r_c_cat_T_12 | _state_r_c_cat_T_13; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_18 = _state_r_c_cat_T_17 | _state_r_c_cat_T_14; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_19 = _state_r_c_cat_T_18 | _state_r_c_cat_T_15; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_20 = _state_r_c_cat_T_19 | _state_r_c_cat_T_16; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_21 = _state_r_c_cat_T_11 | _state_r_c_cat_T_20; // @[package.scala:81:59]
wire _state_r_c_cat_T_22 = _state_r_c_cat_T_4 | _state_r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _state_r_c_cat_T_25 = _state_r_c_cat_T_23 | _state_r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _state_r_c_cat_T_27 = _state_r_c_cat_T_25 | _state_r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _state_r_c_cat_T_32 = _state_r_c_cat_T_28 | _state_r_c_cat_T_29; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_33 = _state_r_c_cat_T_32 | _state_r_c_cat_T_30; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_34 = _state_r_c_cat_T_33 | _state_r_c_cat_T_31; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_40 = _state_r_c_cat_T_35 | _state_r_c_cat_T_36; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_41 = _state_r_c_cat_T_40 | _state_r_c_cat_T_37; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_42 = _state_r_c_cat_T_41 | _state_r_c_cat_T_38; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_43 = _state_r_c_cat_T_42 | _state_r_c_cat_T_39; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_44 = _state_r_c_cat_T_34 | _state_r_c_cat_T_43; // @[package.scala:81:59]
wire _state_r_c_cat_T_45 = _state_r_c_cat_T_27 | _state_r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _state_r_c_cat_T_47 = _state_r_c_cat_T_45 | _state_r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _state_r_c_cat_T_49 = _state_r_c_cat_T_47 | _state_r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] state_r_c = {_state_r_c_cat_T_22, _state_r_c_cat_T_49}; // @[Metadata.scala:29:18]
wire [3:0] _state_r_T = {state_r_c, io_req_old_meta_coh_state_0}; // @[Metadata.scala:29:18, :58:19]
wire _state_r_T_25 = _state_r_T == 4'hC; // @[Misc.scala:49:20]
wire [1:0] _state_r_T_27 = {1'h0, _state_r_T_25}; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_28 = _state_r_T == 4'hD; // @[Misc.scala:49:20]
wire [1:0] _state_r_T_30 = _state_r_T_28 ? 2'h2 : _state_r_T_27; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_31 = _state_r_T == 4'h4; // @[Misc.scala:49:20]
wire [1:0] _state_r_T_33 = _state_r_T_31 ? 2'h1 : _state_r_T_30; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_34 = _state_r_T == 4'h5; // @[Misc.scala:49:20]
wire [1:0] _state_r_T_36 = _state_r_T_34 ? 2'h2 : _state_r_T_33; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_37 = _state_r_T == 4'h0; // @[Misc.scala:49:20]
wire [1:0] _state_r_T_39 = _state_r_T_37 ? 2'h0 : _state_r_T_36; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_40 = _state_r_T == 4'hE; // @[Misc.scala:49:20]
wire _state_r_T_41 = _state_r_T_40; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_42 = _state_r_T_40 ? 2'h3 : _state_r_T_39; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_43 = &_state_r_T; // @[Misc.scala:49:20]
wire _state_r_T_44 = _state_r_T_43 | _state_r_T_41; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_45 = _state_r_T_43 ? 2'h3 : _state_r_T_42; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_46 = _state_r_T == 4'h6; // @[Misc.scala:49:20]
wire _state_r_T_47 = _state_r_T_46 | _state_r_T_44; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_48 = _state_r_T_46 ? 2'h2 : _state_r_T_45; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_49 = _state_r_T == 4'h7; // @[Misc.scala:49:20]
wire _state_r_T_50 = _state_r_T_49 | _state_r_T_47; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_51 = _state_r_T_49 ? 2'h3 : _state_r_T_48; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_52 = _state_r_T == 4'h1; // @[Misc.scala:49:20]
wire _state_r_T_53 = _state_r_T_52 | _state_r_T_50; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_54 = _state_r_T_52 ? 2'h1 : _state_r_T_51; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_55 = _state_r_T == 4'h2; // @[Misc.scala:49:20]
wire _state_r_T_56 = _state_r_T_55 | _state_r_T_53; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_57 = _state_r_T_55 ? 2'h2 : _state_r_T_54; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_58 = _state_r_T == 4'h3; // @[Misc.scala:49:20]
wire state_is_hit = _state_r_T_58 | _state_r_T_56; // @[Misc.scala:35:9, :49:20]
wire [1:0] state_r_2 = _state_r_T_58 ? 2'h3 : _state_r_T_57; // @[Misc.scala:35:36, :49:20]
wire [1:0] state_coh_on_hit_state = state_r_2; // @[Misc.scala:35:36]
wire _state_T_5 = _state_T_3 | _state_T_4; // @[Consts.scala:90:{32,42,49}]
wire _state_T_7 = _state_T_5 | _state_T_6; // @[Consts.scala:90:{42,59,66}]
wire _state_T_12 = _state_T_8 | _state_T_9; // @[package.scala:16:47, :81:59]
wire _state_T_13 = _state_T_12 | _state_T_10; // @[package.scala:16:47, :81:59]
wire _state_T_14 = _state_T_13 | _state_T_11; // @[package.scala:16:47, :81:59]
wire _state_T_20 = _state_T_15 | _state_T_16; // @[package.scala:16:47, :81:59]
wire _state_T_21 = _state_T_20 | _state_T_17; // @[package.scala:16:47, :81:59]
wire _state_T_22 = _state_T_21 | _state_T_18; // @[package.scala:16:47, :81:59]
wire _state_T_23 = _state_T_22 | _state_T_19; // @[package.scala:16:47, :81:59]
wire _state_T_24 = _state_T_14 | _state_T_23; // @[package.scala:81:59]
wire _state_T_25 = _state_T_7 | _state_T_24; // @[Consts.scala:87:44, :90:{59,76}]
wire _state_T_27 = ~_state_T_26; // @[mshrs.scala:220:15]
wire _state_T_28 = ~_state_T_25; // @[Consts.scala:90:76]
assign state_new_state = io_req_tag_match_0 & state_is_hit ? 5'hC : 5'h1; // @[Misc.scala:35:9]
assign io_mem_acquire_valid_0 = (|state) & _io_probe_rdy_T_2; // @[package.scala:16:47]
wire _GEN_29 = ~(|state) | _io_probe_rdy_T_2; // @[package.scala:16:47]
assign io_lb_write_valid_0 = ~_GEN_29 & _io_probe_rdy_T_3 & opdata & io_mem_grant_valid_0; // @[package.scala:16:47]
assign io_mem_grant_ready_0 = ~_GEN_29 & _io_probe_rdy_T_3; // @[package.scala:16:47]
wire _grantack_valid_T = io_mem_grant_bits_opcode_0[2]; // @[Edges.scala:71:36]
wire _grantack_valid_T_1 = io_mem_grant_bits_opcode_0[1]; // @[Edges.scala:71:52]
wire _grantack_valid_T_2 = ~_grantack_valid_T_1; // @[Edges.scala:71:{43,52}]
wire _grantack_valid_T_3 = _grantack_valid_T & _grantack_valid_T_2; // @[Edges.scala:71:{36,40,43}]
wire [4:0] _state_T_29 = grant_had_data ? 5'h3 : 5'hC; // @[mshrs.scala:141:27, :260:19]
wire _drain_load_T = _rpq_io_deq_bits_uop_mem_cmd == 5'h0; // @[package.scala:16:47]
wire _drain_load_T_1 = _rpq_io_deq_bits_uop_mem_cmd == 5'h10; // @[package.scala:16:47]
wire _GEN_30 = _rpq_io_deq_bits_uop_mem_cmd == 5'h6; // @[package.scala:16:47]
wire _drain_load_T_2; // @[package.scala:16:47]
assign _drain_load_T_2 = _GEN_30; // @[package.scala:16:47]
wire _r_c_cat_T_48; // @[Consts.scala:91:71]
assign _r_c_cat_T_48 = _GEN_30; // @[package.scala:16:47]
wire _GEN_31 = _rpq_io_deq_bits_uop_mem_cmd == 5'h7; // @[package.scala:16:47]
wire _drain_load_T_3; // @[package.scala:16:47]
assign _drain_load_T_3 = _GEN_31; // @[package.scala:16:47]
wire _drain_load_T_28; // @[Consts.scala:90:66]
assign _drain_load_T_28 = _GEN_31; // @[package.scala:16:47]
wire _drain_load_T_4 = _drain_load_T | _drain_load_T_1; // @[package.scala:16:47, :81:59]
wire _drain_load_T_5 = _drain_load_T_4 | _drain_load_T_2; // @[package.scala:16:47, :81:59]
wire _drain_load_T_6 = _drain_load_T_5 | _drain_load_T_3; // @[package.scala:16:47, :81:59]
wire _GEN_32 = _rpq_io_deq_bits_uop_mem_cmd == 5'h4; // @[package.scala:16:47]
wire _drain_load_T_7; // @[package.scala:16:47]
assign _drain_load_T_7 = _GEN_32; // @[package.scala:16:47]
wire _drain_load_T_30; // @[package.scala:16:47]
assign _drain_load_T_30 = _GEN_32; // @[package.scala:16:47]
wire _GEN_33 = _rpq_io_deq_bits_uop_mem_cmd == 5'h9; // @[package.scala:16:47]
wire _drain_load_T_8; // @[package.scala:16:47]
assign _drain_load_T_8 = _GEN_33; // @[package.scala:16:47]
wire _drain_load_T_31; // @[package.scala:16:47]
assign _drain_load_T_31 = _GEN_33; // @[package.scala:16:47]
wire _GEN_34 = _rpq_io_deq_bits_uop_mem_cmd == 5'hA; // @[package.scala:16:47]
wire _drain_load_T_9; // @[package.scala:16:47]
assign _drain_load_T_9 = _GEN_34; // @[package.scala:16:47]
wire _drain_load_T_32; // @[package.scala:16:47]
assign _drain_load_T_32 = _GEN_34; // @[package.scala:16:47]
wire _GEN_35 = _rpq_io_deq_bits_uop_mem_cmd == 5'hB; // @[package.scala:16:47]
wire _drain_load_T_10; // @[package.scala:16:47]
assign _drain_load_T_10 = _GEN_35; // @[package.scala:16:47]
wire _drain_load_T_33; // @[package.scala:16:47]
assign _drain_load_T_33 = _GEN_35; // @[package.scala:16:47]
wire _drain_load_T_11 = _drain_load_T_7 | _drain_load_T_8; // @[package.scala:16:47, :81:59]
wire _drain_load_T_12 = _drain_load_T_11 | _drain_load_T_9; // @[package.scala:16:47, :81:59]
wire _drain_load_T_13 = _drain_load_T_12 | _drain_load_T_10; // @[package.scala:16:47, :81:59]
wire _GEN_36 = _rpq_io_deq_bits_uop_mem_cmd == 5'h8; // @[package.scala:16:47]
wire _drain_load_T_14; // @[package.scala:16:47]
assign _drain_load_T_14 = _GEN_36; // @[package.scala:16:47]
wire _drain_load_T_37; // @[package.scala:16:47]
assign _drain_load_T_37 = _GEN_36; // @[package.scala:16:47]
wire _GEN_37 = _rpq_io_deq_bits_uop_mem_cmd == 5'hC; // @[package.scala:16:47]
wire _drain_load_T_15; // @[package.scala:16:47]
assign _drain_load_T_15 = _GEN_37; // @[package.scala:16:47]
wire _drain_load_T_38; // @[package.scala:16:47]
assign _drain_load_T_38 = _GEN_37; // @[package.scala:16:47]
wire _GEN_38 = _rpq_io_deq_bits_uop_mem_cmd == 5'hD; // @[package.scala:16:47]
wire _drain_load_T_16; // @[package.scala:16:47]
assign _drain_load_T_16 = _GEN_38; // @[package.scala:16:47]
wire _drain_load_T_39; // @[package.scala:16:47]
assign _drain_load_T_39 = _GEN_38; // @[package.scala:16:47]
wire _GEN_39 = _rpq_io_deq_bits_uop_mem_cmd == 5'hE; // @[package.scala:16:47]
wire _drain_load_T_17; // @[package.scala:16:47]
assign _drain_load_T_17 = _GEN_39; // @[package.scala:16:47]
wire _drain_load_T_40; // @[package.scala:16:47]
assign _drain_load_T_40 = _GEN_39; // @[package.scala:16:47]
wire _GEN_40 = _rpq_io_deq_bits_uop_mem_cmd == 5'hF; // @[package.scala:16:47]
wire _drain_load_T_18; // @[package.scala:16:47]
assign _drain_load_T_18 = _GEN_40; // @[package.scala:16:47]
wire _drain_load_T_41; // @[package.scala:16:47]
assign _drain_load_T_41 = _GEN_40; // @[package.scala:16:47]
wire _drain_load_T_19 = _drain_load_T_14 | _drain_load_T_15; // @[package.scala:16:47, :81:59]
wire _drain_load_T_20 = _drain_load_T_19 | _drain_load_T_16; // @[package.scala:16:47, :81:59]
wire _drain_load_T_21 = _drain_load_T_20 | _drain_load_T_17; // @[package.scala:16:47, :81:59]
wire _drain_load_T_22 = _drain_load_T_21 | _drain_load_T_18; // @[package.scala:16:47, :81:59]
wire _drain_load_T_23 = _drain_load_T_13 | _drain_load_T_22; // @[package.scala:81:59]
wire _drain_load_T_24 = _drain_load_T_6 | _drain_load_T_23; // @[package.scala:81:59]
wire _drain_load_T_25 = _rpq_io_deq_bits_uop_mem_cmd == 5'h1; // @[Consts.scala:90:32]
wire _drain_load_T_26 = _rpq_io_deq_bits_uop_mem_cmd == 5'h11; // @[Consts.scala:90:49]
wire _drain_load_T_27 = _drain_load_T_25 | _drain_load_T_26; // @[Consts.scala:90:{32,42,49}]
wire _drain_load_T_29 = _drain_load_T_27 | _drain_load_T_28; // @[Consts.scala:90:{42,59,66}]
wire _drain_load_T_34 = _drain_load_T_30 | _drain_load_T_31; // @[package.scala:16:47, :81:59]
wire _drain_load_T_35 = _drain_load_T_34 | _drain_load_T_32; // @[package.scala:16:47, :81:59]
wire _drain_load_T_36 = _drain_load_T_35 | _drain_load_T_33; // @[package.scala:16:47, :81:59]
wire _drain_load_T_42 = _drain_load_T_37 | _drain_load_T_38; // @[package.scala:16:47, :81:59]
wire _drain_load_T_43 = _drain_load_T_42 | _drain_load_T_39; // @[package.scala:16:47, :81:59]
wire _drain_load_T_44 = _drain_load_T_43 | _drain_load_T_40; // @[package.scala:16:47, :81:59]
wire _drain_load_T_45 = _drain_load_T_44 | _drain_load_T_41; // @[package.scala:16:47, :81:59]
wire _drain_load_T_46 = _drain_load_T_36 | _drain_load_T_45; // @[package.scala:81:59]
wire _drain_load_T_47 = _drain_load_T_29 | _drain_load_T_46; // @[Consts.scala:87:44, :90:{59,76}]
wire _drain_load_T_48 = ~_drain_load_T_47; // @[Consts.scala:90:76]
wire _drain_load_T_49 = _drain_load_T_24 & _drain_load_T_48; // @[Consts.scala:89:68]
wire _drain_load_T_50 = _rpq_io_deq_bits_uop_mem_cmd != 5'h6; // @[mshrs.scala:128:19, :269:51]
wire drain_load = _drain_load_T_49 & _drain_load_T_50; // @[mshrs.scala:267:59, :268:60, :269:51]
wire [5:0] _rp_addr_T = _rpq_io_deq_bits_addr[5:0]; // @[mshrs.scala:128:19, :271:61]
wire [33:0] rp_addr = {rp_addr_hi, _rp_addr_T}; // @[mshrs.scala:271:{22,61}]
wire [1:0] size; // @[AMOALU.scala:11:18]
wire _rpq_io_deq_ready_T = io_resp_ready_0 & drain_load; // @[mshrs.scala:36:7, :268:60, :280:40]
wire _io_resp_valid_T = _rpq_io_deq_valid & drain_load; // @[mshrs.scala:128:19, :268:60, :284:43]
wire _GEN_41 = ~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3; // @[package.scala:16:47]
assign io_resp_valid_0 = ~_GEN_41 & _io_probe_rdy_T_4 & _io_resp_valid_T; // @[package.scala:16:47]
wire _io_resp_bits_data_shifted_T = _rpq_io_deq_bits_addr[2]; // @[AMOALU.scala:42:29]
wire [31:0] _io_resp_bits_data_shifted_T_1 = data_word[63:32]; // @[AMOALU.scala:42:37]
wire [31:0] _io_resp_bits_data_T_5 = data_word[63:32]; // @[AMOALU.scala:42:37, :45:94]
wire [31:0] _io_resp_bits_data_shifted_T_2 = data_word[31:0]; // @[AMOALU.scala:42:55]
wire [31:0] io_resp_bits_data_shifted = _io_resp_bits_data_shifted_T ? _io_resp_bits_data_shifted_T_1 : _io_resp_bits_data_shifted_T_2; // @[AMOALU.scala:42:{24,29,37,55}]
wire [31:0] io_resp_bits_data_zeroed = io_resp_bits_data_shifted; // @[AMOALU.scala:42:24, :44:23]
wire _io_resp_bits_data_T = size == 2'h2; // @[AMOALU.scala:11:18, :45:26]
wire _io_resp_bits_data_T_1 = _io_resp_bits_data_T; // @[AMOALU.scala:45:{26,34}]
wire _io_resp_bits_data_T_2 = io_resp_bits_data_zeroed[31]; // @[AMOALU.scala:44:23, :45:81]
wire _io_resp_bits_data_T_3 = _rpq_io_deq_bits_uop_mem_signed & _io_resp_bits_data_T_2; // @[AMOALU.scala:45:{72,81}]
wire [31:0] _io_resp_bits_data_T_4 = {32{_io_resp_bits_data_T_3}}; // @[AMOALU.scala:45:{49,72}]
wire [31:0] _io_resp_bits_data_T_6 = _io_resp_bits_data_T_1 ? _io_resp_bits_data_T_4 : _io_resp_bits_data_T_5; // @[AMOALU.scala:45:{20,34,49,94}]
wire [63:0] _io_resp_bits_data_T_7 = {_io_resp_bits_data_T_6, io_resp_bits_data_zeroed}; // @[AMOALU.scala:44:23, :45:{16,20}]
wire _io_resp_bits_data_shifted_T_3 = _rpq_io_deq_bits_addr[1]; // @[AMOALU.scala:42:29]
wire [15:0] _io_resp_bits_data_shifted_T_4 = _io_resp_bits_data_T_7[31:16]; // @[AMOALU.scala:42:37, :45:16]
wire [15:0] _io_resp_bits_data_shifted_T_5 = _io_resp_bits_data_T_7[15:0]; // @[AMOALU.scala:42:55, :45:16]
wire [15:0] io_resp_bits_data_shifted_1 = _io_resp_bits_data_shifted_T_3 ? _io_resp_bits_data_shifted_T_4 : _io_resp_bits_data_shifted_T_5; // @[AMOALU.scala:42:{24,29,37,55}]
wire [15:0] io_resp_bits_data_zeroed_1 = io_resp_bits_data_shifted_1; // @[AMOALU.scala:42:24, :44:23]
wire _io_resp_bits_data_T_8 = size == 2'h1; // @[AMOALU.scala:11:18, :45:26]
wire _io_resp_bits_data_T_9 = _io_resp_bits_data_T_8; // @[AMOALU.scala:45:{26,34}]
wire _io_resp_bits_data_T_10 = io_resp_bits_data_zeroed_1[15]; // @[AMOALU.scala:44:23, :45:81]
wire _io_resp_bits_data_T_11 = _rpq_io_deq_bits_uop_mem_signed & _io_resp_bits_data_T_10; // @[AMOALU.scala:45:{72,81}]
wire [47:0] _io_resp_bits_data_T_12 = {48{_io_resp_bits_data_T_11}}; // @[AMOALU.scala:45:{49,72}]
wire [47:0] _io_resp_bits_data_T_13 = _io_resp_bits_data_T_7[63:16]; // @[AMOALU.scala:45:{16,94}]
wire [47:0] _io_resp_bits_data_T_14 = _io_resp_bits_data_T_9 ? _io_resp_bits_data_T_12 : _io_resp_bits_data_T_13; // @[AMOALU.scala:45:{20,34,49,94}]
wire [63:0] _io_resp_bits_data_T_15 = {_io_resp_bits_data_T_14, io_resp_bits_data_zeroed_1}; // @[AMOALU.scala:44:23, :45:{16,20}]
wire _io_resp_bits_data_shifted_T_6 = _rpq_io_deq_bits_addr[0]; // @[AMOALU.scala:42:29]
wire [7:0] _io_resp_bits_data_shifted_T_7 = _io_resp_bits_data_T_15[15:8]; // @[AMOALU.scala:42:37, :45:16]
wire [7:0] _io_resp_bits_data_shifted_T_8 = _io_resp_bits_data_T_15[7:0]; // @[AMOALU.scala:42:55, :45:16]
wire [7:0] io_resp_bits_data_shifted_2 = _io_resp_bits_data_shifted_T_6 ? _io_resp_bits_data_shifted_T_7 : _io_resp_bits_data_shifted_T_8; // @[AMOALU.scala:42:{24,29,37,55}]
wire [7:0] io_resp_bits_data_zeroed_2 = io_resp_bits_data_shifted_2; // @[AMOALU.scala:42:24, :44:23]
wire _io_resp_bits_data_T_16 = size == 2'h0; // @[AMOALU.scala:11:18, :45:26]
wire _io_resp_bits_data_T_17 = _io_resp_bits_data_T_16; // @[AMOALU.scala:45:{26,34}]
wire _io_resp_bits_data_T_18 = io_resp_bits_data_zeroed_2[7]; // @[AMOALU.scala:44:23, :45:81]
wire _io_resp_bits_data_T_19 = _rpq_io_deq_bits_uop_mem_signed & _io_resp_bits_data_T_18; // @[AMOALU.scala:45:{72,81}]
wire [55:0] _io_resp_bits_data_T_20 = {56{_io_resp_bits_data_T_19}}; // @[AMOALU.scala:45:{49,72}]
wire [55:0] _io_resp_bits_data_T_21 = _io_resp_bits_data_T_15[63:8]; // @[AMOALU.scala:45:{16,94}]
wire [55:0] _io_resp_bits_data_T_22 = _io_resp_bits_data_T_17 ? _io_resp_bits_data_T_20 : _io_resp_bits_data_T_21; // @[AMOALU.scala:45:{20,34,49,94}]
wire [63:0] _io_resp_bits_data_T_23 = {_io_resp_bits_data_T_22, io_resp_bits_data_zeroed_2}; // @[AMOALU.scala:44:23, :45:{16,20}]
assign io_resp_bits_data_0 = _GEN_41 | ~_io_probe_rdy_T_4 ? _rpq_io_deq_bits_data : _io_resp_bits_data_T_23; // @[package.scala:16:47]
wire _T_26 = rpq_io_deq_ready & _rpq_io_deq_valid; // @[Decoupled.scala:51:35]
wire _T_28 = _rpq_io_empty & ~commit_line; // @[mshrs.scala:128:19, :140:24, :290:{31,34}]
wire _T_33 = _rpq_io_empty | _rpq_io_deq_valid & ~drain_load; // @[mshrs.scala:128:19, :268:60, :296:{31,52,55}]
assign io_commit_val_0 = ~_GEN_41 & _io_probe_rdy_T_4 & ~(_T_26 | _T_28) & _T_33; // @[Decoupled.scala:51:35]
wire _io_meta_read_valid_T = ~io_prober_state_valid_0; // @[mshrs.scala:36:7, :303:27]
wire _io_meta_read_valid_T_1 = ~grantack_valid; // @[mshrs.scala:138:21, :303:53]
wire _io_meta_read_valid_T_2 = _io_meta_read_valid_T | _io_meta_read_valid_T_1; // @[mshrs.scala:303:{27,50,53}]
wire [3:0] _io_meta_read_valid_T_3 = io_prober_state_bits_0[9:6]; // @[mshrs.scala:36:7, :303:93]
wire _io_meta_read_valid_T_4 = _io_meta_read_valid_T_3 != req_idx; // @[mshrs.scala:110:25, :303:{93,120}]
wire _io_meta_read_valid_T_5 = _io_meta_read_valid_T_2 | _io_meta_read_valid_T_4; // @[mshrs.scala:303:{50,69,120}]
assign io_meta_read_valid_0 = ~(~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _io_probe_rdy_T_4) & _io_probe_rdy_T_8 & _io_meta_read_valid_T_5; // @[package.scala:16:47]
wire _T_36 = state == 5'h5; // @[mshrs.scala:107:22, :307:22]
wire _T_37 = state == 5'h6; // @[mshrs.scala:107:22, :309:22]
wire [3:0] _needs_wb_r_T_6 = {2'h2, io_meta_resp_bits_coh_state_0}; // @[Metadata.scala:120:19]
wire _needs_wb_r_T_19 = _needs_wb_r_T_6 == 4'h8; // @[Misc.scala:56:20]
wire [2:0] _needs_wb_r_T_21 = _needs_wb_r_T_19 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20]
wire _needs_wb_r_T_23 = _needs_wb_r_T_6 == 4'h9; // @[Misc.scala:56:20]
wire [2:0] _needs_wb_r_T_25 = _needs_wb_r_T_23 ? 3'h2 : _needs_wb_r_T_21; // @[Misc.scala:38:36, :56:20]
wire _needs_wb_r_T_27 = _needs_wb_r_T_6 == 4'hA; // @[Misc.scala:56:20]
wire [2:0] _needs_wb_r_T_29 = _needs_wb_r_T_27 ? 3'h1 : _needs_wb_r_T_25; // @[Misc.scala:38:36, :56:20]
wire _needs_wb_r_T_31 = _needs_wb_r_T_6 == 4'hB; // @[Misc.scala:56:20]
wire _needs_wb_r_T_32 = _needs_wb_r_T_31; // @[Misc.scala:38:9, :56:20]
wire [2:0] _needs_wb_r_T_33 = _needs_wb_r_T_31 ? 3'h1 : _needs_wb_r_T_29; // @[Misc.scala:38:36, :56:20]
wire _needs_wb_r_T_35 = _needs_wb_r_T_6 == 4'h4; // @[Misc.scala:56:20]
wire _needs_wb_r_T_36 = ~_needs_wb_r_T_35 & _needs_wb_r_T_32; // @[Misc.scala:38:9, :56:20]
wire [2:0] _needs_wb_r_T_37 = _needs_wb_r_T_35 ? 3'h5 : _needs_wb_r_T_33; // @[Misc.scala:38:36, :56:20]
wire _needs_wb_r_T_39 = _needs_wb_r_T_6 == 4'h5; // @[Misc.scala:56:20]
wire _needs_wb_r_T_40 = ~_needs_wb_r_T_39 & _needs_wb_r_T_36; // @[Misc.scala:38:9, :56:20]
wire [2:0] _needs_wb_r_T_41 = _needs_wb_r_T_39 ? 3'h4 : _needs_wb_r_T_37; // @[Misc.scala:38:36, :56:20]
wire [1:0] _needs_wb_r_T_42 = {1'h0, _needs_wb_r_T_39}; // @[Misc.scala:38:63, :56:20]
wire _needs_wb_r_T_43 = _needs_wb_r_T_6 == 4'h6; // @[Misc.scala:56:20]
wire _needs_wb_r_T_44 = ~_needs_wb_r_T_43 & _needs_wb_r_T_40; // @[Misc.scala:38:9, :56:20]
wire [2:0] _needs_wb_r_T_45 = _needs_wb_r_T_43 ? 3'h0 : _needs_wb_r_T_41; // @[Misc.scala:38:36, :56:20]
wire [1:0] _needs_wb_r_T_46 = _needs_wb_r_T_43 ? 2'h1 : _needs_wb_r_T_42; // @[Misc.scala:38:63, :56:20]
wire _needs_wb_r_T_47 = _needs_wb_r_T_6 == 4'h7; // @[Misc.scala:56:20]
wire _needs_wb_r_T_48 = _needs_wb_r_T_47 | _needs_wb_r_T_44; // @[Misc.scala:38:9, :56:20]
wire [2:0] _needs_wb_r_T_49 = _needs_wb_r_T_47 ? 3'h0 : _needs_wb_r_T_45; // @[Misc.scala:38:36, :56:20]
wire [1:0] _needs_wb_r_T_50 = _needs_wb_r_T_47 ? 2'h1 : _needs_wb_r_T_46; // @[Misc.scala:38:63, :56:20]
wire _needs_wb_r_T_51 = _needs_wb_r_T_6 == 4'h0; // @[Misc.scala:56:20]
wire _needs_wb_r_T_52 = ~_needs_wb_r_T_51 & _needs_wb_r_T_48; // @[Misc.scala:38:9, :56:20]
wire [2:0] _needs_wb_r_T_53 = _needs_wb_r_T_51 ? 3'h5 : _needs_wb_r_T_49; // @[Misc.scala:38:36, :56:20]
wire [1:0] _needs_wb_r_T_54 = _needs_wb_r_T_51 ? 2'h0 : _needs_wb_r_T_50; // @[Misc.scala:38:63, :56:20]
wire _needs_wb_r_T_55 = _needs_wb_r_T_6 == 4'h1; // @[Misc.scala:56:20]
wire _needs_wb_r_T_56 = ~_needs_wb_r_T_55 & _needs_wb_r_T_52; // @[Misc.scala:38:9, :56:20]
wire [2:0] _needs_wb_r_T_57 = _needs_wb_r_T_55 ? 3'h4 : _needs_wb_r_T_53; // @[Misc.scala:38:36, :56:20]
wire [1:0] _needs_wb_r_T_58 = _needs_wb_r_T_55 ? 2'h1 : _needs_wb_r_T_54; // @[Misc.scala:38:63, :56:20]
wire _needs_wb_r_T_59 = _needs_wb_r_T_6 == 4'h2; // @[Misc.scala:56:20]
wire _needs_wb_r_T_60 = ~_needs_wb_r_T_59 & _needs_wb_r_T_56; // @[Misc.scala:38:9, :56:20]
wire [2:0] _needs_wb_r_T_61 = _needs_wb_r_T_59 ? 3'h3 : _needs_wb_r_T_57; // @[Misc.scala:38:36, :56:20]
wire [1:0] _needs_wb_r_T_62 = _needs_wb_r_T_59 ? 2'h2 : _needs_wb_r_T_58; // @[Misc.scala:38:63, :56:20]
wire _needs_wb_r_T_63 = _needs_wb_r_T_6 == 4'h3; // @[Misc.scala:56:20]
wire needs_wb = _needs_wb_r_T_63 | _needs_wb_r_T_60; // @[Misc.scala:38:9, :56:20]
wire [2:0] needs_wb_r_2 = _needs_wb_r_T_63 ? 3'h3 : _needs_wb_r_T_61; // @[Misc.scala:38:36, :56:20]
wire [1:0] needs_wb_r_3 = _needs_wb_r_T_63 ? 2'h2 : _needs_wb_r_T_62; // @[Misc.scala:38:63, :56:20]
wire [1:0] needs_wb_meta_state = needs_wb_r_3; // @[Misc.scala:38:63]
wire _state_T_30 = ~io_meta_resp_valid_0; // @[mshrs.scala:36:7, :311:18]
wire [4:0] _state_T_31 = needs_wb ? 5'h7 : 5'hB; // @[Misc.scala:38:9]
wire [4:0] _state_T_32 = _state_T_30 ? 5'h4 : _state_T_31; // @[mshrs.scala:311:{17,18}, :312:17]
wire _T_38 = state == 5'h7; // @[mshrs.scala:107:22, :313:22]
wire _T_40 = state == 5'h9; // @[mshrs.scala:107:22, :319:22]
assign io_wb_req_valid_0 = ~(~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _io_probe_rdy_T_4 | _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38) & _T_40; // @[package.scala:16:47]
wire _T_42 = state == 5'hA; // @[mshrs.scala:107:22, :324:22]
wire _T_43 = state == 5'hB; // @[mshrs.scala:107:22, :328:22]
wire _GEN_42 = _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38 | _T_40 | _T_42; // @[mshrs.scala:148:129, :200:21, :302:39, :307:{22,41}, :309:{22,41}, :313:{22,40}, :319:{22,36}, :324:{22,37}, :328:41]
assign io_lb_read_offset_0 = _GEN_41 ? _io_lb_read_offset_T[2:0] : _io_probe_rdy_T_4 ? _io_lb_read_offset_T_1[2:0] : _GEN_42 | ~_T_43 ? _io_lb_read_offset_T[2:0] : refill_ctr; // @[package.scala:16:47]
wire _GEN_43 = _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _io_probe_rdy_T_4 | _GEN_42; // @[package.scala:16:47]
assign io_refill_valid_0 = ~(~(|state) | _GEN_43) & _T_43; // @[package.scala:16:47]
wire [3:0] _refill_ctr_T = {1'h0, refill_ctr} + 4'h1; // @[mshrs.scala:139:24, :333:32]
wire [2:0] _refill_ctr_T_1 = _refill_ctr_T[2:0]; // @[mshrs.scala:333:32]
wire _T_46 = state == 5'hC; // @[mshrs.scala:107:22, :338:22]
wire _GEN_44 = _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38 | _T_40 | _T_42 | _T_43; // @[mshrs.scala:148:129, :176:26, :302:39, :307:{22,41}, :309:{22,41}, :313:{22,40}, :319:{22,36}, :324:{22,37}, :328:{22,41}, :338:39]
wire _GEN_45 = _io_probe_rdy_T_4 | _GEN_44; // @[package.scala:16:47]
wire _GEN_46 = ~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _GEN_45; // @[package.scala:16:47]
assign io_replay_valid_0 = ~_GEN_46 & _T_46 & _rpq_io_deq_valid; // @[mshrs.scala:36:7, :128:19, :176:26, :234:30, :241:40, :246:41, :266:45, :302:39, :307:41, :309:41, :313:40, :319:36, :324:37, :328:41, :338:{22,39}, :339:15]
assign rpq_io_deq_ready = ~_GEN_41 & (_io_probe_rdy_T_4 ? _rpq_io_deq_ready_T : ~_GEN_44 & _T_46 & io_replay_ready_0); // @[package.scala:16:47]
wire _GEN_47 = _GEN_46 | ~_T_46; // @[mshrs.scala:176:26, :177:26, :234:30, :241:40, :246:41, :266:45, :302:39, :307:41, :309:41, :313:40, :319:36, :324:37, :328:41, :338:{22,39}]
assign io_replay_bits_way_en_0 = _GEN_47 ? _rpq_io_deq_bits_way_en : req_way_en; // @[mshrs.scala:36:7, :109:20, :128:19, :177:26, :234:30, :241:40, :246:41, :266:45, :302:39, :307:41, :309:41, :313:40, :319:36, :324:37, :328:41, :338:39]
wire [5:0] _io_replay_bits_addr_T = _rpq_io_deq_bits_addr[5:0]; // @[mshrs.scala:128:19, :341:70]
wire [33:0] _io_replay_bits_addr_T_1 = {io_replay_bits_addr_hi, _io_replay_bits_addr_T}; // @[mshrs.scala:341:{31,70}]
assign io_replay_bits_addr_0 = _GEN_47 ? _rpq_io_deq_bits_addr : _io_replay_bits_addr_T_1; // @[mshrs.scala:36:7, :128:19, :177:26, :234:30, :241:40, :246:41, :266:45, :302:39, :307:41, :309:41, :313:40, :319:36, :324:37, :328:41, :338:39, :341:31]
wire _T_48 = _rpq_io_deq_bits_uop_mem_cmd == 5'h1; // @[Consts.scala:90:32]
wire _r_c_cat_T; // @[Consts.scala:90:32]
assign _r_c_cat_T = _T_48; // @[Consts.scala:90:32]
wire _r_c_cat_T_23; // @[Consts.scala:90:32]
assign _r_c_cat_T_23 = _T_48; // @[Consts.scala:90:32]
wire _T_49 = _rpq_io_deq_bits_uop_mem_cmd == 5'h11; // @[Consts.scala:90:49]
wire _r_c_cat_T_1; // @[Consts.scala:90:49]
assign _r_c_cat_T_1 = _T_49; // @[Consts.scala:90:49]
wire _r_c_cat_T_24; // @[Consts.scala:90:49]
assign _r_c_cat_T_24 = _T_49; // @[Consts.scala:90:49]
wire _T_51 = _rpq_io_deq_bits_uop_mem_cmd == 5'h7; // @[Consts.scala:90:66]
wire _r_c_cat_T_3; // @[Consts.scala:90:66]
assign _r_c_cat_T_3 = _T_51; // @[Consts.scala:90:66]
wire _r_c_cat_T_26; // @[Consts.scala:90:66]
assign _r_c_cat_T_26 = _T_51; // @[Consts.scala:90:66]
wire _T_53 = _rpq_io_deq_bits_uop_mem_cmd == 5'h4; // @[package.scala:16:47]
wire _r_c_cat_T_5; // @[package.scala:16:47]
assign _r_c_cat_T_5 = _T_53; // @[package.scala:16:47]
wire _r_c_cat_T_28; // @[package.scala:16:47]
assign _r_c_cat_T_28 = _T_53; // @[package.scala:16:47]
wire _T_54 = _rpq_io_deq_bits_uop_mem_cmd == 5'h9; // @[package.scala:16:47]
wire _r_c_cat_T_6; // @[package.scala:16:47]
assign _r_c_cat_T_6 = _T_54; // @[package.scala:16:47]
wire _r_c_cat_T_29; // @[package.scala:16:47]
assign _r_c_cat_T_29 = _T_54; // @[package.scala:16:47]
wire _T_55 = _rpq_io_deq_bits_uop_mem_cmd == 5'hA; // @[package.scala:16:47]
wire _r_c_cat_T_7; // @[package.scala:16:47]
assign _r_c_cat_T_7 = _T_55; // @[package.scala:16:47]
wire _r_c_cat_T_30; // @[package.scala:16:47]
assign _r_c_cat_T_30 = _T_55; // @[package.scala:16:47]
wire _T_56 = _rpq_io_deq_bits_uop_mem_cmd == 5'hB; // @[package.scala:16:47]
wire _r_c_cat_T_8; // @[package.scala:16:47]
assign _r_c_cat_T_8 = _T_56; // @[package.scala:16:47]
wire _r_c_cat_T_31; // @[package.scala:16:47]
assign _r_c_cat_T_31 = _T_56; // @[package.scala:16:47]
wire _T_60 = _rpq_io_deq_bits_uop_mem_cmd == 5'h8; // @[package.scala:16:47]
wire _r_c_cat_T_12; // @[package.scala:16:47]
assign _r_c_cat_T_12 = _T_60; // @[package.scala:16:47]
wire _r_c_cat_T_35; // @[package.scala:16:47]
assign _r_c_cat_T_35 = _T_60; // @[package.scala:16:47]
wire _T_61 = _rpq_io_deq_bits_uop_mem_cmd == 5'hC; // @[package.scala:16:47]
wire _r_c_cat_T_13; // @[package.scala:16:47]
assign _r_c_cat_T_13 = _T_61; // @[package.scala:16:47]
wire _r_c_cat_T_36; // @[package.scala:16:47]
assign _r_c_cat_T_36 = _T_61; // @[package.scala:16:47]
wire _T_62 = _rpq_io_deq_bits_uop_mem_cmd == 5'hD; // @[package.scala:16:47]
wire _r_c_cat_T_14; // @[package.scala:16:47]
assign _r_c_cat_T_14 = _T_62; // @[package.scala:16:47]
wire _r_c_cat_T_37; // @[package.scala:16:47]
assign _r_c_cat_T_37 = _T_62; // @[package.scala:16:47]
wire _T_63 = _rpq_io_deq_bits_uop_mem_cmd == 5'hE; // @[package.scala:16:47]
wire _r_c_cat_T_15; // @[package.scala:16:47]
assign _r_c_cat_T_15 = _T_63; // @[package.scala:16:47]
wire _r_c_cat_T_38; // @[package.scala:16:47]
assign _r_c_cat_T_38 = _T_63; // @[package.scala:16:47]
wire _T_64 = _rpq_io_deq_bits_uop_mem_cmd == 5'hF; // @[package.scala:16:47]
wire _r_c_cat_T_16; // @[package.scala:16:47]
assign _r_c_cat_T_16 = _T_64; // @[package.scala:16:47]
wire _r_c_cat_T_39; // @[package.scala:16:47]
assign _r_c_cat_T_39 = _T_64; // @[package.scala:16:47]
wire _T_71 = io_replay_ready_0 & io_replay_valid_0 & (_T_48 | _T_49 | _T_51 | _T_53 | _T_54 | _T_55 | _T_56 | _T_60 | _T_61 | _T_62 | _T_63 | _T_64); // @[Decoupled.scala:51:35]
wire _r_c_cat_T_2 = _r_c_cat_T | _r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}]
wire _r_c_cat_T_4 = _r_c_cat_T_2 | _r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}]
wire _r_c_cat_T_9 = _r_c_cat_T_5 | _r_c_cat_T_6; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_10 = _r_c_cat_T_9 | _r_c_cat_T_7; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_11 = _r_c_cat_T_10 | _r_c_cat_T_8; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_17 = _r_c_cat_T_12 | _r_c_cat_T_13; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_18 = _r_c_cat_T_17 | _r_c_cat_T_14; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_19 = _r_c_cat_T_18 | _r_c_cat_T_15; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_20 = _r_c_cat_T_19 | _r_c_cat_T_16; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_21 = _r_c_cat_T_11 | _r_c_cat_T_20; // @[package.scala:81:59]
wire _r_c_cat_T_22 = _r_c_cat_T_4 | _r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}]
wire _r_c_cat_T_25 = _r_c_cat_T_23 | _r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}]
wire _r_c_cat_T_27 = _r_c_cat_T_25 | _r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}]
wire _r_c_cat_T_32 = _r_c_cat_T_28 | _r_c_cat_T_29; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_33 = _r_c_cat_T_32 | _r_c_cat_T_30; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_34 = _r_c_cat_T_33 | _r_c_cat_T_31; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_40 = _r_c_cat_T_35 | _r_c_cat_T_36; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_41 = _r_c_cat_T_40 | _r_c_cat_T_37; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_42 = _r_c_cat_T_41 | _r_c_cat_T_38; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_43 = _r_c_cat_T_42 | _r_c_cat_T_39; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_44 = _r_c_cat_T_34 | _r_c_cat_T_43; // @[package.scala:81:59]
wire _r_c_cat_T_45 = _r_c_cat_T_27 | _r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}]
wire _r_c_cat_T_46 = _rpq_io_deq_bits_uop_mem_cmd == 5'h3; // @[Consts.scala:91:54]
wire _r_c_cat_T_47 = _r_c_cat_T_45 | _r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}]
wire _r_c_cat_T_49 = _r_c_cat_T_47 | _r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}]
wire [1:0] r_c = {_r_c_cat_T_22, _r_c_cat_T_49}; // @[Metadata.scala:29:18]
wire [3:0] _r_T_64 = {r_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19]
wire _r_T_89 = _r_T_64 == 4'hC; // @[Misc.scala:49:20]
wire [1:0] _r_T_91 = {1'h0, _r_T_89}; // @[Misc.scala:35:36, :49:20]
wire _r_T_92 = _r_T_64 == 4'hD; // @[Misc.scala:49:20]
wire [1:0] _r_T_94 = _r_T_92 ? 2'h2 : _r_T_91; // @[Misc.scala:35:36, :49:20]
wire _r_T_95 = _r_T_64 == 4'h4; // @[Misc.scala:49:20]
wire [1:0] _r_T_97 = _r_T_95 ? 2'h1 : _r_T_94; // @[Misc.scala:35:36, :49:20]
wire _r_T_98 = _r_T_64 == 4'h5; // @[Misc.scala:49:20]
wire [1:0] _r_T_100 = _r_T_98 ? 2'h2 : _r_T_97; // @[Misc.scala:35:36, :49:20]
wire _r_T_101 = _r_T_64 == 4'h0; // @[Misc.scala:49:20]
wire [1:0] _r_T_103 = _r_T_101 ? 2'h0 : _r_T_100; // @[Misc.scala:35:36, :49:20]
wire _r_T_104 = _r_T_64 == 4'hE; // @[Misc.scala:49:20]
wire _r_T_105 = _r_T_104; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_106 = _r_T_104 ? 2'h3 : _r_T_103; // @[Misc.scala:35:36, :49:20]
wire _r_T_107 = &_r_T_64; // @[Misc.scala:49:20]
wire _r_T_108 = _r_T_107 | _r_T_105; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_109 = _r_T_107 ? 2'h3 : _r_T_106; // @[Misc.scala:35:36, :49:20]
wire _r_T_110 = _r_T_64 == 4'h6; // @[Misc.scala:49:20]
wire _r_T_111 = _r_T_110 | _r_T_108; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_112 = _r_T_110 ? 2'h2 : _r_T_109; // @[Misc.scala:35:36, :49:20]
wire _r_T_113 = _r_T_64 == 4'h7; // @[Misc.scala:49:20]
wire _r_T_114 = _r_T_113 | _r_T_111; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_115 = _r_T_113 ? 2'h3 : _r_T_112; // @[Misc.scala:35:36, :49:20]
wire _r_T_116 = _r_T_64 == 4'h1; // @[Misc.scala:49:20]
wire _r_T_117 = _r_T_116 | _r_T_114; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_118 = _r_T_116 ? 2'h1 : _r_T_115; // @[Misc.scala:35:36, :49:20]
wire _r_T_119 = _r_T_64 == 4'h2; // @[Misc.scala:49:20]
wire _r_T_120 = _r_T_119 | _r_T_117; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_121 = _r_T_119 ? 2'h2 : _r_T_118; // @[Misc.scala:35:36, :49:20]
wire _r_T_122 = _r_T_64 == 4'h3; // @[Misc.scala:49:20]
wire is_hit = _r_T_122 | _r_T_120; // @[Misc.scala:35:9, :49:20]
wire [1:0] r_2_1 = _r_T_122 ? 2'h3 : _r_T_121; // @[Misc.scala:35:36, :49:20]
wire [1:0] coh_on_hit_state = r_2_1; // @[Misc.scala:35:36]
wire _GEN_48 = _T_40 | _T_42 | _T_43 | _T_46; // @[mshrs.scala:156:26, :319:{22,36}, :324:{22,37}, :328:{22,41}, :338:{22,39}, :351:44]
assign io_meta_write_valid_0 = ~(~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _io_probe_rdy_T_4 | _io_probe_rdy_T_8 | _T_36 | _T_37) & (_T_38 | ~_GEN_48 & _sec_rdy_T_4); // @[package.scala:16:47]
wire _GEN_49 = _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38 | _GEN_48; // @[mshrs.scala:148:129, :156:26, :158:31, :302:39, :307:{22,41}, :309:{22,41}, :313:{22,40}, :319:36, :324:37, :328:41, :338:39, :351:44]
assign io_meta_write_bits_data_coh_state_0 = ~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _io_probe_rdy_T_4 | _GEN_49 | ~_sec_rdy_T_4 ? coh_on_clear_state : new_coh_state; // @[package.scala:16:47]
wire _GEN_50 = _io_probe_rdy_T_4 | _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38 | _T_40 | _T_42 | _T_43 | _T_46 | _sec_rdy_T_4; // @[package.scala:16:47]
assign io_mem_finish_valid_0 = ~(~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _GEN_50) & _sec_rdy_T_5 & grantack_valid; // @[package.scala:16:47]
wire [4:0] _state_T_33 = finish_to_prefetch ? 5'h11 : 5'h0; // @[mshrs.scala:142:31, :368:17]
wire _GEN_51 = _sec_rdy_T_4 | _sec_rdy_T_5 | _sec_rdy_T_6; // @[package.scala:16:47]
wire _GEN_52 = _T_46 | _GEN_51; // @[mshrs.scala:162:26, :338:{22,39}, :351:44, :361:42, :367:42, :369:38]
wire _GEN_53 = _io_probe_rdy_T_4 | _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38 | _T_40 | _T_42 | _T_43 | _GEN_52; // @[package.scala:16:47]
wire _GEN_54 = _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _GEN_53; // @[package.scala:16:47]
assign io_req_pri_rdy_0 = ~(|state) | ~_GEN_54 & _io_way_valid_T_1; // @[package.scala:16:47]
wire _T_87 = io_req_sec_val_0 & ~io_req_sec_rdy_0 | io_clear_prefetch_0; // @[mshrs.scala:36:7, :371:{27,30,47}]
wire _r_c_cat_T_52 = _r_c_cat_T_50 | _r_c_cat_T_51; // @[Consts.scala:90:{32,42,49}]
wire _r_c_cat_T_54 = _r_c_cat_T_52 | _r_c_cat_T_53; // @[Consts.scala:90:{42,59,66}]
wire _r_c_cat_T_59 = _r_c_cat_T_55 | _r_c_cat_T_56; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_60 = _r_c_cat_T_59 | _r_c_cat_T_57; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_61 = _r_c_cat_T_60 | _r_c_cat_T_58; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_67 = _r_c_cat_T_62 | _r_c_cat_T_63; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_68 = _r_c_cat_T_67 | _r_c_cat_T_64; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_69 = _r_c_cat_T_68 | _r_c_cat_T_65; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_70 = _r_c_cat_T_69 | _r_c_cat_T_66; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_71 = _r_c_cat_T_61 | _r_c_cat_T_70; // @[package.scala:81:59]
wire _r_c_cat_T_72 = _r_c_cat_T_54 | _r_c_cat_T_71; // @[Consts.scala:87:44, :90:{59,76}]
wire _r_c_cat_T_75 = _r_c_cat_T_73 | _r_c_cat_T_74; // @[Consts.scala:90:{32,42,49}]
wire _r_c_cat_T_77 = _r_c_cat_T_75 | _r_c_cat_T_76; // @[Consts.scala:90:{42,59,66}]
wire _r_c_cat_T_82 = _r_c_cat_T_78 | _r_c_cat_T_79; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_83 = _r_c_cat_T_82 | _r_c_cat_T_80; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_84 = _r_c_cat_T_83 | _r_c_cat_T_81; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_90 = _r_c_cat_T_85 | _r_c_cat_T_86; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_91 = _r_c_cat_T_90 | _r_c_cat_T_87; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_92 = _r_c_cat_T_91 | _r_c_cat_T_88; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_93 = _r_c_cat_T_92 | _r_c_cat_T_89; // @[package.scala:16:47, :81:59]
wire _r_c_cat_T_94 = _r_c_cat_T_84 | _r_c_cat_T_93; // @[package.scala:81:59]
wire _r_c_cat_T_95 = _r_c_cat_T_77 | _r_c_cat_T_94; // @[Consts.scala:87:44, :90:{59,76}]
wire _r_c_cat_T_97 = _r_c_cat_T_95 | _r_c_cat_T_96; // @[Consts.scala:90:76, :91:{47,54}]
wire _r_c_cat_T_99 = _r_c_cat_T_97 | _r_c_cat_T_98; // @[Consts.scala:91:{47,64,71}]
wire [1:0] r_c_1 = {_r_c_cat_T_72, _r_c_cat_T_99}; // @[Metadata.scala:29:18]
wire [3:0] _r_T_123 = {r_c_1, new_coh_state}; // @[Metadata.scala:29:18, :58:19]
wire _r_T_148 = _r_T_123 == 4'hC; // @[Misc.scala:49:20]
wire [1:0] _r_T_150 = {1'h0, _r_T_148}; // @[Misc.scala:35:36, :49:20]
wire _r_T_151 = _r_T_123 == 4'hD; // @[Misc.scala:49:20]
wire [1:0] _r_T_153 = _r_T_151 ? 2'h2 : _r_T_150; // @[Misc.scala:35:36, :49:20]
wire _r_T_154 = _r_T_123 == 4'h4; // @[Misc.scala:49:20]
wire [1:0] _r_T_156 = _r_T_154 ? 2'h1 : _r_T_153; // @[Misc.scala:35:36, :49:20]
wire _r_T_157 = _r_T_123 == 4'h5; // @[Misc.scala:49:20]
wire [1:0] _r_T_159 = _r_T_157 ? 2'h2 : _r_T_156; // @[Misc.scala:35:36, :49:20]
wire _r_T_160 = _r_T_123 == 4'h0; // @[Misc.scala:49:20]
wire [1:0] _r_T_162 = _r_T_160 ? 2'h0 : _r_T_159; // @[Misc.scala:35:36, :49:20]
wire _r_T_163 = _r_T_123 == 4'hE; // @[Misc.scala:49:20]
wire _r_T_164 = _r_T_163; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_165 = _r_T_163 ? 2'h3 : _r_T_162; // @[Misc.scala:35:36, :49:20]
wire _r_T_166 = &_r_T_123; // @[Misc.scala:49:20]
wire _r_T_167 = _r_T_166 | _r_T_164; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_168 = _r_T_166 ? 2'h3 : _r_T_165; // @[Misc.scala:35:36, :49:20]
wire _r_T_169 = _r_T_123 == 4'h6; // @[Misc.scala:49:20]
wire _r_T_170 = _r_T_169 | _r_T_167; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_171 = _r_T_169 ? 2'h2 : _r_T_168; // @[Misc.scala:35:36, :49:20]
wire _r_T_172 = _r_T_123 == 4'h7; // @[Misc.scala:49:20]
wire _r_T_173 = _r_T_172 | _r_T_170; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_174 = _r_T_172 ? 2'h3 : _r_T_171; // @[Misc.scala:35:36, :49:20]
wire _r_T_175 = _r_T_123 == 4'h1; // @[Misc.scala:49:20]
wire _r_T_176 = _r_T_175 | _r_T_173; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_177 = _r_T_175 ? 2'h1 : _r_T_174; // @[Misc.scala:35:36, :49:20]
wire _r_T_178 = _r_T_123 == 4'h2; // @[Misc.scala:49:20]
wire _r_T_179 = _r_T_178 | _r_T_176; // @[Misc.scala:35:9, :49:20]
wire [1:0] _r_T_180 = _r_T_178 ? 2'h2 : _r_T_177; // @[Misc.scala:35:36, :49:20]
wire _r_T_181 = _r_T_123 == 4'h3; // @[Misc.scala:49:20]
wire is_hit_1 = _r_T_181 | _r_T_179; // @[Misc.scala:35:9, :49:20]
wire [1:0] r_2_2 = _r_T_181 ? 2'h3 : _r_T_180; // @[Misc.scala:35:36, :49:20]
wire [1:0] coh_on_hit_1_state = r_2_2; // @[Misc.scala:35:36]
wire [4:0] state_new_state_1; // @[mshrs.scala:210:29]
wire _state_T_35 = ~_state_T_34; // @[mshrs.scala:213:11]
wire _state_T_36 = ~_rpq_io_enq_ready; // @[mshrs.scala:128:19, :213:11]
wire _state_req_needs_wb_r_T_83 = _state_req_needs_wb_r_T_70 == 4'h8; // @[Misc.scala:56:20]
wire [2:0] _state_req_needs_wb_r_T_85 = _state_req_needs_wb_r_T_83 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20]
wire _state_req_needs_wb_r_T_87 = _state_req_needs_wb_r_T_70 == 4'h9; // @[Misc.scala:56:20]
wire [2:0] _state_req_needs_wb_r_T_89 = _state_req_needs_wb_r_T_87 ? 3'h2 : _state_req_needs_wb_r_T_85; // @[Misc.scala:38:36, :56:20]
wire _state_req_needs_wb_r_T_91 = _state_req_needs_wb_r_T_70 == 4'hA; // @[Misc.scala:56:20]
wire [2:0] _state_req_needs_wb_r_T_93 = _state_req_needs_wb_r_T_91 ? 3'h1 : _state_req_needs_wb_r_T_89; // @[Misc.scala:38:36, :56:20]
wire _state_req_needs_wb_r_T_95 = _state_req_needs_wb_r_T_70 == 4'hB; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_96 = _state_req_needs_wb_r_T_95; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_97 = _state_req_needs_wb_r_T_95 ? 3'h1 : _state_req_needs_wb_r_T_93; // @[Misc.scala:38:36, :56:20]
wire _state_req_needs_wb_r_T_99 = _state_req_needs_wb_r_T_70 == 4'h4; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_100 = ~_state_req_needs_wb_r_T_99 & _state_req_needs_wb_r_T_96; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_101 = _state_req_needs_wb_r_T_99 ? 3'h5 : _state_req_needs_wb_r_T_97; // @[Misc.scala:38:36, :56:20]
wire _state_req_needs_wb_r_T_103 = _state_req_needs_wb_r_T_70 == 4'h5; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_104 = ~_state_req_needs_wb_r_T_103 & _state_req_needs_wb_r_T_100; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_105 = _state_req_needs_wb_r_T_103 ? 3'h4 : _state_req_needs_wb_r_T_101; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_106 = {1'h0, _state_req_needs_wb_r_T_103}; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_107 = _state_req_needs_wb_r_T_70 == 4'h6; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_108 = ~_state_req_needs_wb_r_T_107 & _state_req_needs_wb_r_T_104; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_109 = _state_req_needs_wb_r_T_107 ? 3'h0 : _state_req_needs_wb_r_T_105; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_110 = _state_req_needs_wb_r_T_107 ? 2'h1 : _state_req_needs_wb_r_T_106; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_111 = _state_req_needs_wb_r_T_70 == 4'h7; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_112 = _state_req_needs_wb_r_T_111 | _state_req_needs_wb_r_T_108; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_113 = _state_req_needs_wb_r_T_111 ? 3'h0 : _state_req_needs_wb_r_T_109; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_114 = _state_req_needs_wb_r_T_111 ? 2'h1 : _state_req_needs_wb_r_T_110; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_115 = _state_req_needs_wb_r_T_70 == 4'h0; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_116 = ~_state_req_needs_wb_r_T_115 & _state_req_needs_wb_r_T_112; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_117 = _state_req_needs_wb_r_T_115 ? 3'h5 : _state_req_needs_wb_r_T_113; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_118 = _state_req_needs_wb_r_T_115 ? 2'h0 : _state_req_needs_wb_r_T_114; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_119 = _state_req_needs_wb_r_T_70 == 4'h1; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_120 = ~_state_req_needs_wb_r_T_119 & _state_req_needs_wb_r_T_116; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_121 = _state_req_needs_wb_r_T_119 ? 3'h4 : _state_req_needs_wb_r_T_117; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_122 = _state_req_needs_wb_r_T_119 ? 2'h1 : _state_req_needs_wb_r_T_118; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_123 = _state_req_needs_wb_r_T_70 == 4'h2; // @[Misc.scala:56:20]
wire _state_req_needs_wb_r_T_124 = ~_state_req_needs_wb_r_T_123 & _state_req_needs_wb_r_T_120; // @[Misc.scala:38:9, :56:20]
wire [2:0] _state_req_needs_wb_r_T_125 = _state_req_needs_wb_r_T_123 ? 3'h3 : _state_req_needs_wb_r_T_121; // @[Misc.scala:38:36, :56:20]
wire [1:0] _state_req_needs_wb_r_T_126 = _state_req_needs_wb_r_T_123 ? 2'h2 : _state_req_needs_wb_r_T_122; // @[Misc.scala:38:63, :56:20]
wire _state_req_needs_wb_r_T_127 = _state_req_needs_wb_r_T_70 == 4'h3; // @[Misc.scala:56:20]
wire state_req_needs_wb_r_1_1 = _state_req_needs_wb_r_T_127 | _state_req_needs_wb_r_T_124; // @[Misc.scala:38:9, :56:20]
wire [2:0] state_req_needs_wb_r_2_1 = _state_req_needs_wb_r_T_127 ? 3'h3 : _state_req_needs_wb_r_T_125; // @[Misc.scala:38:36, :56:20]
wire [1:0] state_req_needs_wb_r_3_1 = _state_req_needs_wb_r_T_127 ? 2'h2 : _state_req_needs_wb_r_T_126; // @[Misc.scala:38:63, :56:20]
wire [1:0] state_req_needs_wb_meta_1_state = state_req_needs_wb_r_3_1; // @[Misc.scala:38:63]
wire _state_r_c_cat_T_52 = _state_r_c_cat_T_50 | _state_r_c_cat_T_51; // @[Consts.scala:90:{32,42,49}]
wire _state_r_c_cat_T_54 = _state_r_c_cat_T_52 | _state_r_c_cat_T_53; // @[Consts.scala:90:{42,59,66}]
wire _state_r_c_cat_T_59 = _state_r_c_cat_T_55 | _state_r_c_cat_T_56; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_60 = _state_r_c_cat_T_59 | _state_r_c_cat_T_57; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_61 = _state_r_c_cat_T_60 | _state_r_c_cat_T_58; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_67 = _state_r_c_cat_T_62 | _state_r_c_cat_T_63; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_68 = _state_r_c_cat_T_67 | _state_r_c_cat_T_64; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_69 = _state_r_c_cat_T_68 | _state_r_c_cat_T_65; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_70 = _state_r_c_cat_T_69 | _state_r_c_cat_T_66; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_71 = _state_r_c_cat_T_61 | _state_r_c_cat_T_70; // @[package.scala:81:59]
wire _state_r_c_cat_T_72 = _state_r_c_cat_T_54 | _state_r_c_cat_T_71; // @[Consts.scala:87:44, :90:{59,76}]
wire _state_r_c_cat_T_75 = _state_r_c_cat_T_73 | _state_r_c_cat_T_74; // @[Consts.scala:90:{32,42,49}]
wire _state_r_c_cat_T_77 = _state_r_c_cat_T_75 | _state_r_c_cat_T_76; // @[Consts.scala:90:{42,59,66}]
wire _state_r_c_cat_T_82 = _state_r_c_cat_T_78 | _state_r_c_cat_T_79; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_83 = _state_r_c_cat_T_82 | _state_r_c_cat_T_80; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_84 = _state_r_c_cat_T_83 | _state_r_c_cat_T_81; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_90 = _state_r_c_cat_T_85 | _state_r_c_cat_T_86; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_91 = _state_r_c_cat_T_90 | _state_r_c_cat_T_87; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_92 = _state_r_c_cat_T_91 | _state_r_c_cat_T_88; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_93 = _state_r_c_cat_T_92 | _state_r_c_cat_T_89; // @[package.scala:16:47, :81:59]
wire _state_r_c_cat_T_94 = _state_r_c_cat_T_84 | _state_r_c_cat_T_93; // @[package.scala:81:59]
wire _state_r_c_cat_T_95 = _state_r_c_cat_T_77 | _state_r_c_cat_T_94; // @[Consts.scala:87:44, :90:{59,76}]
wire _state_r_c_cat_T_97 = _state_r_c_cat_T_95 | _state_r_c_cat_T_96; // @[Consts.scala:90:76, :91:{47,54}]
wire _state_r_c_cat_T_99 = _state_r_c_cat_T_97 | _state_r_c_cat_T_98; // @[Consts.scala:91:{47,64,71}]
wire [1:0] state_r_c_1 = {_state_r_c_cat_T_72, _state_r_c_cat_T_99}; // @[Metadata.scala:29:18]
wire [3:0] _state_r_T_59 = {state_r_c_1, io_req_old_meta_coh_state_0}; // @[Metadata.scala:29:18, :58:19]
wire _state_r_T_84 = _state_r_T_59 == 4'hC; // @[Misc.scala:49:20]
wire [1:0] _state_r_T_86 = {1'h0, _state_r_T_84}; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_87 = _state_r_T_59 == 4'hD; // @[Misc.scala:49:20]
wire [1:0] _state_r_T_89 = _state_r_T_87 ? 2'h2 : _state_r_T_86; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_90 = _state_r_T_59 == 4'h4; // @[Misc.scala:49:20]
wire [1:0] _state_r_T_92 = _state_r_T_90 ? 2'h1 : _state_r_T_89; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_93 = _state_r_T_59 == 4'h5; // @[Misc.scala:49:20]
wire [1:0] _state_r_T_95 = _state_r_T_93 ? 2'h2 : _state_r_T_92; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_96 = _state_r_T_59 == 4'h0; // @[Misc.scala:49:20]
wire [1:0] _state_r_T_98 = _state_r_T_96 ? 2'h0 : _state_r_T_95; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_99 = _state_r_T_59 == 4'hE; // @[Misc.scala:49:20]
wire _state_r_T_100 = _state_r_T_99; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_101 = _state_r_T_99 ? 2'h3 : _state_r_T_98; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_102 = &_state_r_T_59; // @[Misc.scala:49:20]
wire _state_r_T_103 = _state_r_T_102 | _state_r_T_100; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_104 = _state_r_T_102 ? 2'h3 : _state_r_T_101; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_105 = _state_r_T_59 == 4'h6; // @[Misc.scala:49:20]
wire _state_r_T_106 = _state_r_T_105 | _state_r_T_103; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_107 = _state_r_T_105 ? 2'h2 : _state_r_T_104; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_108 = _state_r_T_59 == 4'h7; // @[Misc.scala:49:20]
wire _state_r_T_109 = _state_r_T_108 | _state_r_T_106; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_110 = _state_r_T_108 ? 2'h3 : _state_r_T_107; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_111 = _state_r_T_59 == 4'h1; // @[Misc.scala:49:20]
wire _state_r_T_112 = _state_r_T_111 | _state_r_T_109; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_113 = _state_r_T_111 ? 2'h1 : _state_r_T_110; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_114 = _state_r_T_59 == 4'h2; // @[Misc.scala:49:20]
wire _state_r_T_115 = _state_r_T_114 | _state_r_T_112; // @[Misc.scala:35:9, :49:20]
wire [1:0] _state_r_T_116 = _state_r_T_114 ? 2'h2 : _state_r_T_113; // @[Misc.scala:35:36, :49:20]
wire _state_r_T_117 = _state_r_T_59 == 4'h3; // @[Misc.scala:49:20]
wire state_is_hit_1 = _state_r_T_117 | _state_r_T_115; // @[Misc.scala:35:9, :49:20]
wire [1:0] state_r_2_1 = _state_r_T_117 ? 2'h3 : _state_r_T_116; // @[Misc.scala:35:36, :49:20]
wire [1:0] state_coh_on_hit_1_state = state_r_2_1; // @[Misc.scala:35:36]
wire _state_T_39 = _state_T_37 | _state_T_38; // @[Consts.scala:90:{32,42,49}]
wire _state_T_41 = _state_T_39 | _state_T_40; // @[Consts.scala:90:{42,59,66}]
wire _state_T_46 = _state_T_42 | _state_T_43; // @[package.scala:16:47, :81:59]
wire _state_T_47 = _state_T_46 | _state_T_44; // @[package.scala:16:47, :81:59]
wire _state_T_48 = _state_T_47 | _state_T_45; // @[package.scala:16:47, :81:59]
wire _state_T_54 = _state_T_49 | _state_T_50; // @[package.scala:16:47, :81:59]
wire _state_T_55 = _state_T_54 | _state_T_51; // @[package.scala:16:47, :81:59]
wire _state_T_56 = _state_T_55 | _state_T_52; // @[package.scala:16:47, :81:59]
wire _state_T_57 = _state_T_56 | _state_T_53; // @[package.scala:16:47, :81:59]
wire _state_T_58 = _state_T_48 | _state_T_57; // @[package.scala:81:59]
wire _state_T_59 = _state_T_41 | _state_T_58; // @[Consts.scala:87:44, :90:{59,76}]
wire _state_T_61 = ~_state_T_60; // @[mshrs.scala:220:15]
wire _state_T_62 = ~_state_T_59; // @[Consts.scala:90:76] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_96( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_152 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_89( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
output io_q // @[ShiftReg.scala:36:14]
);
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_145 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_18( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59]
wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27]
wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25]
wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_41 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_47 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28]
wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57]
wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57]
wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1027:0] _c_sizes_set_T_1 = 1028'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59]
wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40]
wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [519:0] c_sizes_set = 520'h0; // @[Monitor.scala:741:34]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46]
wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76]
wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117]
wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48]
wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119]
wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31]
wire _source_ok_T_26 = io_in_a_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31]
wire _source_ok_T_27 = io_in_a_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31]
wire _source_ok_T_28 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_28; // @[Parameters.scala:1138:31]
wire _source_ok_T_29 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_34 = _source_ok_T_33 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_35 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [28:0] _is_aligned_T = {17'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_36 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_37 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_43 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_49 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_55 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_38 = _source_ok_T_37 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_42 = _source_ok_T_40; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_42; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_44 = _source_ok_T_43 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_48 = _source_ok_T_46; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_48; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_50 = _source_ok_T_49 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_54; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_56 = _source_ok_T_55 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_60; // @[Parameters.scala:1138:31]
wire _source_ok_T_61 = io_in_d_bits_source_0 == 7'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_5 = _source_ok_T_61; // @[Parameters.scala:1138:31]
wire _source_ok_T_62 = io_in_d_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_62; // @[Parameters.scala:1138:31]
wire _source_ok_T_63 = io_in_d_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_63; // @[Parameters.scala:1138:31]
wire _source_ok_T_64 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_64; // @[Parameters.scala:1138:31]
wire _source_ok_T_65 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_68 = _source_ok_T_67 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_69 = _source_ok_T_68 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_70 = _source_ok_T_69 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_71 = _source_ok_T_70 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_71 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1555 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1555; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1555; // @[Decoupled.scala:51:35]
wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [28:0] address; // @[Monitor.scala:391:22]
wire _T_1628 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1628; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1628; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1628; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [519:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [519:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [7:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [9:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99]
wire [519:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [519:0] _a_size_lookup_T_6 = {512'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}]
wire [519:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[519:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_3 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_3; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_3; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1481 = _T_1555 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1481 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1481 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1481 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1481 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [9:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77]
wire [1027:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1481 ? _a_sizes_set_T_1[519:0] : 520'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [519:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1527 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1527 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1496 = _T_1628 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1496 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1496 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1496 ? _d_sizes_clr_T_5[519:0] : 520'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [519:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [519:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [519:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [519:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [519:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [7:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [519:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [519:0] _c_size_lookup_T_6 = {512'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}]
wire [519:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[519:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [519:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1599 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1599 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1581 = _T_1628 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1581 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1581 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1581 ? _d_sizes_clr_T_11[519:0] : 520'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [519:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [519:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulAddRecFNToRaw_postMul_e8_s24_71( // @[MulAddRecFN.scala:169:7]
input io_fromPreMul_isSigNaNAny, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isNaNAOrB, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isInfA, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isZeroA, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isInfB, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isZeroB, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_signProd, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isNaNC, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isInfC, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isZeroC, // @[MulAddRecFN.scala:172:16]
input [9:0] io_fromPreMul_sExpSum, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_doSubMags, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_CIsDominant, // @[MulAddRecFN.scala:172:16]
input [4:0] io_fromPreMul_CDom_CAlignDist, // @[MulAddRecFN.scala:172:16]
input [25:0] io_fromPreMul_highAlignedSigC, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_bit0AlignedSigC, // @[MulAddRecFN.scala:172:16]
input [48:0] io_mulAddResult, // @[MulAddRecFN.scala:172:16]
output io_invalidExc, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isNaN, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isInf, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isZero, // @[MulAddRecFN.scala:172:16]
output io_rawOut_sign, // @[MulAddRecFN.scala:172:16]
output [9:0] io_rawOut_sExp, // @[MulAddRecFN.scala:172:16]
output [26:0] io_rawOut_sig // @[MulAddRecFN.scala:172:16]
);
wire io_fromPreMul_isSigNaNAny_0 = io_fromPreMul_isSigNaNAny; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isNaNAOrB_0 = io_fromPreMul_isNaNAOrB; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isInfA_0 = io_fromPreMul_isInfA; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isZeroA_0 = io_fromPreMul_isZeroA; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isInfB_0 = io_fromPreMul_isInfB; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isZeroB_0 = io_fromPreMul_isZeroB; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_signProd_0 = io_fromPreMul_signProd; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isNaNC_0 = io_fromPreMul_isNaNC; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isInfC_0 = io_fromPreMul_isInfC; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isZeroC_0 = io_fromPreMul_isZeroC; // @[MulAddRecFN.scala:169:7]
wire [9:0] io_fromPreMul_sExpSum_0 = io_fromPreMul_sExpSum; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_doSubMags_0 = io_fromPreMul_doSubMags; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_CIsDominant_0 = io_fromPreMul_CIsDominant; // @[MulAddRecFN.scala:169:7]
wire [4:0] io_fromPreMul_CDom_CAlignDist_0 = io_fromPreMul_CDom_CAlignDist; // @[MulAddRecFN.scala:169:7]
wire [25:0] io_fromPreMul_highAlignedSigC_0 = io_fromPreMul_highAlignedSigC; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_bit0AlignedSigC_0 = io_fromPreMul_bit0AlignedSigC; // @[MulAddRecFN.scala:169:7]
wire [48:0] io_mulAddResult_0 = io_mulAddResult; // @[MulAddRecFN.scala:169:7]
wire _io_rawOut_sign_T_3 = 1'h1; // @[MulAddRecFN.scala:287:29]
wire roundingMode_min = 1'h0; // @[MulAddRecFN.scala:186:45]
wire _io_rawOut_sign_T_8 = 1'h0; // @[MulAddRecFN.scala:289:26]
wire _io_rawOut_sign_T_10 = 1'h0; // @[MulAddRecFN.scala:289:46]
wire [2:0] io_roundingMode = 3'h0; // @[MulAddRecFN.scala:169:7, :172:16]
wire _io_invalidExc_T_9; // @[MulAddRecFN.scala:273:57]
wire _io_rawOut_isNaN_T; // @[MulAddRecFN.scala:278:48]
wire notNaN_isInfOut; // @[MulAddRecFN.scala:265:44]
wire _io_rawOut_isZero_T_2; // @[MulAddRecFN.scala:282:25]
wire _io_rawOut_sign_T_17; // @[MulAddRecFN.scala:290:50]
wire [9:0] _io_rawOut_sExp_T; // @[MulAddRecFN.scala:293:26]
wire [26:0] _io_rawOut_sig_T; // @[MulAddRecFN.scala:294:25]
wire io_rawOut_isNaN_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_isInf_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_isZero_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_sign_0; // @[MulAddRecFN.scala:169:7]
wire [9:0] io_rawOut_sExp_0; // @[MulAddRecFN.scala:169:7]
wire [26:0] io_rawOut_sig_0; // @[MulAddRecFN.scala:169:7]
wire io_invalidExc_0; // @[MulAddRecFN.scala:169:7]
wire opSignC = io_fromPreMul_signProd_0 ^ io_fromPreMul_doSubMags_0; // @[MulAddRecFN.scala:169:7, :190:42]
wire _sigSum_T = io_mulAddResult_0[48]; // @[MulAddRecFN.scala:169:7, :192:32]
wire [26:0] _sigSum_T_1 = {1'h0, io_fromPreMul_highAlignedSigC_0} + 27'h1; // @[MulAddRecFN.scala:169:7, :193:47]
wire [25:0] _sigSum_T_2 = _sigSum_T_1[25:0]; // @[MulAddRecFN.scala:193:47]
wire [25:0] _sigSum_T_3 = _sigSum_T ? _sigSum_T_2 : io_fromPreMul_highAlignedSigC_0; // @[MulAddRecFN.scala:169:7, :192:{16,32}, :193:47]
wire [47:0] _sigSum_T_4 = io_mulAddResult_0[47:0]; // @[MulAddRecFN.scala:169:7, :196:28]
wire [73:0] sigSum_hi = {_sigSum_T_3, _sigSum_T_4}; // @[MulAddRecFN.scala:192:{12,16}, :196:28]
wire [74:0] sigSum = {sigSum_hi, io_fromPreMul_bit0AlignedSigC_0}; // @[MulAddRecFN.scala:169:7, :192:12]
wire [1:0] _CDom_sExp_T = {1'h0, io_fromPreMul_doSubMags_0}; // @[MulAddRecFN.scala:169:7, :203:69]
wire [10:0] _GEN = {io_fromPreMul_sExpSum_0[9], io_fromPreMul_sExpSum_0}; // @[MulAddRecFN.scala:169:7, :203:43]
wire [10:0] _CDom_sExp_T_1 = _GEN - {{9{_CDom_sExp_T[1]}}, _CDom_sExp_T}; // @[MulAddRecFN.scala:203:{43,69}]
wire [9:0] _CDom_sExp_T_2 = _CDom_sExp_T_1[9:0]; // @[MulAddRecFN.scala:203:43]
wire [9:0] CDom_sExp = _CDom_sExp_T_2; // @[MulAddRecFN.scala:203:43]
wire [49:0] _CDom_absSigSum_T = sigSum[74:25]; // @[MulAddRecFN.scala:192:12, :206:20]
wire [49:0] _CDom_absSigSum_T_1 = ~_CDom_absSigSum_T; // @[MulAddRecFN.scala:206:{13,20}]
wire [1:0] _CDom_absSigSum_T_2 = io_fromPreMul_highAlignedSigC_0[25:24]; // @[MulAddRecFN.scala:169:7, :209:46]
wire [2:0] _CDom_absSigSum_T_3 = {1'h0, _CDom_absSigSum_T_2}; // @[MulAddRecFN.scala:207:22, :209:46]
wire [46:0] _CDom_absSigSum_T_4 = sigSum[72:26]; // @[MulAddRecFN.scala:192:12, :210:23]
wire [49:0] _CDom_absSigSum_T_5 = {_CDom_absSigSum_T_3, _CDom_absSigSum_T_4}; // @[MulAddRecFN.scala:207:22, :209:71, :210:23]
wire [49:0] CDom_absSigSum = io_fromPreMul_doSubMags_0 ? _CDom_absSigSum_T_1 : _CDom_absSigSum_T_5; // @[MulAddRecFN.scala:169:7, :205:12, :206:13, :209:71]
wire [23:0] _CDom_absSigSumExtra_T = sigSum[24:1]; // @[MulAddRecFN.scala:192:12, :215:21]
wire [23:0] _CDom_absSigSumExtra_T_1 = ~_CDom_absSigSumExtra_T; // @[MulAddRecFN.scala:215:{14,21}]
wire _CDom_absSigSumExtra_T_2 = |_CDom_absSigSumExtra_T_1; // @[MulAddRecFN.scala:215:{14,36}]
wire [24:0] _CDom_absSigSumExtra_T_3 = sigSum[25:1]; // @[MulAddRecFN.scala:192:12, :216:19]
wire _CDom_absSigSumExtra_T_4 = |_CDom_absSigSumExtra_T_3; // @[MulAddRecFN.scala:216:{19,37}]
wire CDom_absSigSumExtra = io_fromPreMul_doSubMags_0 ? _CDom_absSigSumExtra_T_2 : _CDom_absSigSumExtra_T_4; // @[MulAddRecFN.scala:169:7, :214:12, :215:36, :216:37]
wire [80:0] _CDom_mainSig_T = {31'h0, CDom_absSigSum} << io_fromPreMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:169:7, :205:12, :219:24]
wire [28:0] CDom_mainSig = _CDom_mainSig_T[49:21]; // @[MulAddRecFN.scala:219:{24,56}]
wire [23:0] _CDom_reduced4SigExtra_T = CDom_absSigSum[23:0]; // @[MulAddRecFN.scala:205:12, :222:36]
wire [26:0] _CDom_reduced4SigExtra_T_1 = {_CDom_reduced4SigExtra_T, 3'h0}; // @[MulAddRecFN.scala:169:7, :172:16, :222:{36,53}]
wire _CDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:123:57]
wire CDom_reduced4SigExtra_reducedVec_0; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_1; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_2; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_3; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_4; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_5; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_6; // @[primitives.scala:118:30]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_0_T = _CDom_reduced4SigExtra_T_1[3:0]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_0_T_1 = |_CDom_reduced4SigExtra_reducedVec_0_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_0 = _CDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_1_T = _CDom_reduced4SigExtra_T_1[7:4]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_1_T_1 = |_CDom_reduced4SigExtra_reducedVec_1_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_1 = _CDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_2_T = _CDom_reduced4SigExtra_T_1[11:8]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_2_T_1 = |_CDom_reduced4SigExtra_reducedVec_2_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_2 = _CDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_3_T = _CDom_reduced4SigExtra_T_1[15:12]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_3_T_1 = |_CDom_reduced4SigExtra_reducedVec_3_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_3 = _CDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_4_T = _CDom_reduced4SigExtra_T_1[19:16]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_4_T_1 = |_CDom_reduced4SigExtra_reducedVec_4_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_4 = _CDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_5_T = _CDom_reduced4SigExtra_T_1[23:20]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_5_T_1 = |_CDom_reduced4SigExtra_reducedVec_5_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_5 = _CDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:118:30, :120:54]
wire [2:0] _CDom_reduced4SigExtra_reducedVec_6_T = _CDom_reduced4SigExtra_T_1[26:24]; // @[primitives.scala:123:15]
assign _CDom_reduced4SigExtra_reducedVec_6_T_1 = |_CDom_reduced4SigExtra_reducedVec_6_T; // @[primitives.scala:123:{15,57}]
assign CDom_reduced4SigExtra_reducedVec_6 = _CDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:118:30, :123:57]
wire [1:0] CDom_reduced4SigExtra_lo_hi = {CDom_reduced4SigExtra_reducedVec_2, CDom_reduced4SigExtra_reducedVec_1}; // @[primitives.scala:118:30, :124:20]
wire [2:0] CDom_reduced4SigExtra_lo = {CDom_reduced4SigExtra_lo_hi, CDom_reduced4SigExtra_reducedVec_0}; // @[primitives.scala:118:30, :124:20]
wire [1:0] CDom_reduced4SigExtra_hi_lo = {CDom_reduced4SigExtra_reducedVec_4, CDom_reduced4SigExtra_reducedVec_3}; // @[primitives.scala:118:30, :124:20]
wire [1:0] CDom_reduced4SigExtra_hi_hi = {CDom_reduced4SigExtra_reducedVec_6, CDom_reduced4SigExtra_reducedVec_5}; // @[primitives.scala:118:30, :124:20]
wire [3:0] CDom_reduced4SigExtra_hi = {CDom_reduced4SigExtra_hi_hi, CDom_reduced4SigExtra_hi_lo}; // @[primitives.scala:124:20]
wire [6:0] _CDom_reduced4SigExtra_T_2 = {CDom_reduced4SigExtra_hi, CDom_reduced4SigExtra_lo}; // @[primitives.scala:124:20]
wire [2:0] _CDom_reduced4SigExtra_T_3 = io_fromPreMul_CDom_CAlignDist_0[4:2]; // @[MulAddRecFN.scala:169:7, :223:51]
wire [2:0] _CDom_reduced4SigExtra_T_4 = ~_CDom_reduced4SigExtra_T_3; // @[primitives.scala:52:21]
wire [8:0] CDom_reduced4SigExtra_shift = $signed(9'sh100 >>> _CDom_reduced4SigExtra_T_4); // @[primitives.scala:52:21, :76:56]
wire [5:0] _CDom_reduced4SigExtra_T_5 = CDom_reduced4SigExtra_shift[6:1]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _CDom_reduced4SigExtra_T_6 = _CDom_reduced4SigExtra_T_5[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _CDom_reduced4SigExtra_T_7 = _CDom_reduced4SigExtra_T_6[1:0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_8 = _CDom_reduced4SigExtra_T_7[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_9 = _CDom_reduced4SigExtra_T_7[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_10 = {_CDom_reduced4SigExtra_T_8, _CDom_reduced4SigExtra_T_9}; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_11 = _CDom_reduced4SigExtra_T_6[3:2]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_12 = _CDom_reduced4SigExtra_T_11[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_13 = _CDom_reduced4SigExtra_T_11[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_14 = {_CDom_reduced4SigExtra_T_12, _CDom_reduced4SigExtra_T_13}; // @[primitives.scala:77:20]
wire [3:0] _CDom_reduced4SigExtra_T_15 = {_CDom_reduced4SigExtra_T_10, _CDom_reduced4SigExtra_T_14}; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_16 = _CDom_reduced4SigExtra_T_5[5:4]; // @[primitives.scala:77:20, :78:22]
wire _CDom_reduced4SigExtra_T_17 = _CDom_reduced4SigExtra_T_16[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_18 = _CDom_reduced4SigExtra_T_16[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_19 = {_CDom_reduced4SigExtra_T_17, _CDom_reduced4SigExtra_T_18}; // @[primitives.scala:77:20]
wire [5:0] _CDom_reduced4SigExtra_T_20 = {_CDom_reduced4SigExtra_T_15, _CDom_reduced4SigExtra_T_19}; // @[primitives.scala:77:20]
wire [6:0] _CDom_reduced4SigExtra_T_21 = {1'h0, _CDom_reduced4SigExtra_T_2[5:0] & _CDom_reduced4SigExtra_T_20}; // @[primitives.scala:77:20, :124:20]
wire CDom_reduced4SigExtra = |_CDom_reduced4SigExtra_T_21; // @[MulAddRecFN.scala:222:72, :223:73]
wire [25:0] _CDom_sig_T = CDom_mainSig[28:3]; // @[MulAddRecFN.scala:219:56, :225:25]
wire [2:0] _CDom_sig_T_1 = CDom_mainSig[2:0]; // @[MulAddRecFN.scala:219:56, :226:25]
wire _CDom_sig_T_2 = |_CDom_sig_T_1; // @[MulAddRecFN.scala:226:{25,32}]
wire _CDom_sig_T_3 = _CDom_sig_T_2 | CDom_reduced4SigExtra; // @[MulAddRecFN.scala:223:73, :226:{32,36}]
wire _CDom_sig_T_4 = _CDom_sig_T_3 | CDom_absSigSumExtra; // @[MulAddRecFN.scala:214:12, :226:{36,61}]
wire [26:0] CDom_sig = {_CDom_sig_T, _CDom_sig_T_4}; // @[MulAddRecFN.scala:225:{12,25}, :226:61]
wire notCDom_signSigSum = sigSum[51]; // @[MulAddRecFN.scala:192:12, :232:36]
wire [50:0] _notCDom_absSigSum_T = sigSum[50:0]; // @[MulAddRecFN.scala:192:12, :235:20]
wire [50:0] _notCDom_absSigSum_T_2 = sigSum[50:0]; // @[MulAddRecFN.scala:192:12, :235:20, :236:19]
wire [50:0] _notCDom_absSigSum_T_1 = ~_notCDom_absSigSum_T; // @[MulAddRecFN.scala:235:{13,20}]
wire [51:0] _notCDom_absSigSum_T_3 = {1'h0, _notCDom_absSigSum_T_2} + {51'h0, io_fromPreMul_doSubMags_0}; // @[MulAddRecFN.scala:169:7, :236:{19,41}]
wire [50:0] _notCDom_absSigSum_T_4 = _notCDom_absSigSum_T_3[50:0]; // @[MulAddRecFN.scala:236:41]
wire [50:0] notCDom_absSigSum = notCDom_signSigSum ? _notCDom_absSigSum_T_1 : _notCDom_absSigSum_T_4; // @[MulAddRecFN.scala:232:36, :234:12, :235:13, :236:41]
wire _notCDom_reduced2AbsSigSum_reducedVec_0_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_1_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_2_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_3_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_4_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_5_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_6_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_7_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_8_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_9_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_10_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_11_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_12_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_13_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_14_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_15_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_16_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_17_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_18_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_19_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_20_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_21_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_22_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_23_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_24_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_25_T_1; // @[primitives.scala:106:57]
wire notCDom_reduced2AbsSigSum_reducedVec_0; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_1; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_2; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_3; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_4; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_5; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_6; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_7; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_8; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_9; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_10; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_11; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_12; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_13; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_14; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_15; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_16; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_17; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_18; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_19; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_20; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_21; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_22; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_23; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_24; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_25; // @[primitives.scala:101:30]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_0_T = notCDom_absSigSum[1:0]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_0_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_0_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_0 = _notCDom_reduced2AbsSigSum_reducedVec_0_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_1_T = notCDom_absSigSum[3:2]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_1_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_1_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_1 = _notCDom_reduced2AbsSigSum_reducedVec_1_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_2_T = notCDom_absSigSum[5:4]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_2_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_2_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_2 = _notCDom_reduced2AbsSigSum_reducedVec_2_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_3_T = notCDom_absSigSum[7:6]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_3_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_3_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_3 = _notCDom_reduced2AbsSigSum_reducedVec_3_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_4_T = notCDom_absSigSum[9:8]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_4_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_4_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_4 = _notCDom_reduced2AbsSigSum_reducedVec_4_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_5_T = notCDom_absSigSum[11:10]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_5_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_5_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_5 = _notCDom_reduced2AbsSigSum_reducedVec_5_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_6_T = notCDom_absSigSum[13:12]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_6_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_6_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_6 = _notCDom_reduced2AbsSigSum_reducedVec_6_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_7_T = notCDom_absSigSum[15:14]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_7_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_7_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_7 = _notCDom_reduced2AbsSigSum_reducedVec_7_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_8_T = notCDom_absSigSum[17:16]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_8_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_8_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_8 = _notCDom_reduced2AbsSigSum_reducedVec_8_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_9_T = notCDom_absSigSum[19:18]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_9_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_9_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_9 = _notCDom_reduced2AbsSigSum_reducedVec_9_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_10_T = notCDom_absSigSum[21:20]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_10_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_10_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_10 = _notCDom_reduced2AbsSigSum_reducedVec_10_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_11_T = notCDom_absSigSum[23:22]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_11_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_11_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_11 = _notCDom_reduced2AbsSigSum_reducedVec_11_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_12_T = notCDom_absSigSum[25:24]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_12_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_12_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_12 = _notCDom_reduced2AbsSigSum_reducedVec_12_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_13_T = notCDom_absSigSum[27:26]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_13_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_13_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_13 = _notCDom_reduced2AbsSigSum_reducedVec_13_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_14_T = notCDom_absSigSum[29:28]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_14_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_14_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_14 = _notCDom_reduced2AbsSigSum_reducedVec_14_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_15_T = notCDom_absSigSum[31:30]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_15_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_15_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_15 = _notCDom_reduced2AbsSigSum_reducedVec_15_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_16_T = notCDom_absSigSum[33:32]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_16_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_16_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_16 = _notCDom_reduced2AbsSigSum_reducedVec_16_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_17_T = notCDom_absSigSum[35:34]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_17_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_17_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_17 = _notCDom_reduced2AbsSigSum_reducedVec_17_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_18_T = notCDom_absSigSum[37:36]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_18_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_18_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_18 = _notCDom_reduced2AbsSigSum_reducedVec_18_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_19_T = notCDom_absSigSum[39:38]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_19_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_19_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_19 = _notCDom_reduced2AbsSigSum_reducedVec_19_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_20_T = notCDom_absSigSum[41:40]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_20_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_20_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_20 = _notCDom_reduced2AbsSigSum_reducedVec_20_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_21_T = notCDom_absSigSum[43:42]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_21_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_21_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_21 = _notCDom_reduced2AbsSigSum_reducedVec_21_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_22_T = notCDom_absSigSum[45:44]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_22_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_22_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_22 = _notCDom_reduced2AbsSigSum_reducedVec_22_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_23_T = notCDom_absSigSum[47:46]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_23_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_23_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_23 = _notCDom_reduced2AbsSigSum_reducedVec_23_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_24_T = notCDom_absSigSum[49:48]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_24_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_24_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_24 = _notCDom_reduced2AbsSigSum_reducedVec_24_T_1; // @[primitives.scala:101:30, :103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_25_T = notCDom_absSigSum[50]; // @[primitives.scala:106:15]
assign _notCDom_reduced2AbsSigSum_reducedVec_25_T_1 = _notCDom_reduced2AbsSigSum_reducedVec_25_T; // @[primitives.scala:106:{15,57}]
assign notCDom_reduced2AbsSigSum_reducedVec_25 = _notCDom_reduced2AbsSigSum_reducedVec_25_T_1; // @[primitives.scala:101:30, :106:57]
wire [1:0] notCDom_reduced2AbsSigSum_lo_lo_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_2, notCDom_reduced2AbsSigSum_reducedVec_1}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_lo_lo = {notCDom_reduced2AbsSigSum_lo_lo_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_0}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_lo_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_5, notCDom_reduced2AbsSigSum_reducedVec_4}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_lo_hi = {notCDom_reduced2AbsSigSum_lo_lo_hi_hi, notCDom_reduced2AbsSigSum_reducedVec_3}; // @[primitives.scala:101:30, :107:20]
wire [5:0] notCDom_reduced2AbsSigSum_lo_lo = {notCDom_reduced2AbsSigSum_lo_lo_hi, notCDom_reduced2AbsSigSum_lo_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_8, notCDom_reduced2AbsSigSum_reducedVec_7}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_hi_lo = {notCDom_reduced2AbsSigSum_lo_hi_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_6}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_hi_lo = {notCDom_reduced2AbsSigSum_reducedVec_10, notCDom_reduced2AbsSigSum_reducedVec_9}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_12, notCDom_reduced2AbsSigSum_reducedVec_11}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced2AbsSigSum_lo_hi_hi = {notCDom_reduced2AbsSigSum_lo_hi_hi_hi, notCDom_reduced2AbsSigSum_lo_hi_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] notCDom_reduced2AbsSigSum_lo_hi = {notCDom_reduced2AbsSigSum_lo_hi_hi, notCDom_reduced2AbsSigSum_lo_hi_lo}; // @[primitives.scala:107:20]
wire [12:0] notCDom_reduced2AbsSigSum_lo = {notCDom_reduced2AbsSigSum_lo_hi, notCDom_reduced2AbsSigSum_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_lo_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_15, notCDom_reduced2AbsSigSum_reducedVec_14}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_lo_lo = {notCDom_reduced2AbsSigSum_hi_lo_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_13}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_lo_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_18, notCDom_reduced2AbsSigSum_reducedVec_17}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_lo_hi = {notCDom_reduced2AbsSigSum_hi_lo_hi_hi, notCDom_reduced2AbsSigSum_reducedVec_16}; // @[primitives.scala:101:30, :107:20]
wire [5:0] notCDom_reduced2AbsSigSum_hi_lo = {notCDom_reduced2AbsSigSum_hi_lo_hi, notCDom_reduced2AbsSigSum_hi_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_21, notCDom_reduced2AbsSigSum_reducedVec_20}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_hi_lo = {notCDom_reduced2AbsSigSum_hi_hi_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_19}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_hi_lo = {notCDom_reduced2AbsSigSum_reducedVec_23, notCDom_reduced2AbsSigSum_reducedVec_22}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_25, notCDom_reduced2AbsSigSum_reducedVec_24}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced2AbsSigSum_hi_hi_hi = {notCDom_reduced2AbsSigSum_hi_hi_hi_hi, notCDom_reduced2AbsSigSum_hi_hi_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] notCDom_reduced2AbsSigSum_hi_hi = {notCDom_reduced2AbsSigSum_hi_hi_hi, notCDom_reduced2AbsSigSum_hi_hi_lo}; // @[primitives.scala:107:20]
wire [12:0] notCDom_reduced2AbsSigSum_hi = {notCDom_reduced2AbsSigSum_hi_hi, notCDom_reduced2AbsSigSum_hi_lo}; // @[primitives.scala:107:20]
wire [25:0] notCDom_reduced2AbsSigSum = {notCDom_reduced2AbsSigSum_hi, notCDom_reduced2AbsSigSum_lo}; // @[primitives.scala:107:20]
wire _notCDom_normDistReduced2_T = notCDom_reduced2AbsSigSum[0]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_1 = notCDom_reduced2AbsSigSum[1]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_2 = notCDom_reduced2AbsSigSum[2]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_3 = notCDom_reduced2AbsSigSum[3]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_4 = notCDom_reduced2AbsSigSum[4]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_5 = notCDom_reduced2AbsSigSum[5]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_6 = notCDom_reduced2AbsSigSum[6]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_7 = notCDom_reduced2AbsSigSum[7]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_8 = notCDom_reduced2AbsSigSum[8]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_9 = notCDom_reduced2AbsSigSum[9]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_10 = notCDom_reduced2AbsSigSum[10]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_11 = notCDom_reduced2AbsSigSum[11]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_12 = notCDom_reduced2AbsSigSum[12]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_13 = notCDom_reduced2AbsSigSum[13]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_14 = notCDom_reduced2AbsSigSum[14]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_15 = notCDom_reduced2AbsSigSum[15]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_16 = notCDom_reduced2AbsSigSum[16]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_17 = notCDom_reduced2AbsSigSum[17]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_18 = notCDom_reduced2AbsSigSum[18]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_19 = notCDom_reduced2AbsSigSum[19]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_20 = notCDom_reduced2AbsSigSum[20]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_21 = notCDom_reduced2AbsSigSum[21]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_22 = notCDom_reduced2AbsSigSum[22]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_23 = notCDom_reduced2AbsSigSum[23]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_24 = notCDom_reduced2AbsSigSum[24]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_25 = notCDom_reduced2AbsSigSum[25]; // @[primitives.scala:91:52, :107:20]
wire [4:0] _notCDom_normDistReduced2_T_26 = {4'hC, ~_notCDom_normDistReduced2_T_1}; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_27 = _notCDom_normDistReduced2_T_2 ? 5'h17 : _notCDom_normDistReduced2_T_26; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_28 = _notCDom_normDistReduced2_T_3 ? 5'h16 : _notCDom_normDistReduced2_T_27; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_29 = _notCDom_normDistReduced2_T_4 ? 5'h15 : _notCDom_normDistReduced2_T_28; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_30 = _notCDom_normDistReduced2_T_5 ? 5'h14 : _notCDom_normDistReduced2_T_29; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_31 = _notCDom_normDistReduced2_T_6 ? 5'h13 : _notCDom_normDistReduced2_T_30; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_32 = _notCDom_normDistReduced2_T_7 ? 5'h12 : _notCDom_normDistReduced2_T_31; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_33 = _notCDom_normDistReduced2_T_8 ? 5'h11 : _notCDom_normDistReduced2_T_32; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_34 = _notCDom_normDistReduced2_T_9 ? 5'h10 : _notCDom_normDistReduced2_T_33; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_35 = _notCDom_normDistReduced2_T_10 ? 5'hF : _notCDom_normDistReduced2_T_34; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_36 = _notCDom_normDistReduced2_T_11 ? 5'hE : _notCDom_normDistReduced2_T_35; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_37 = _notCDom_normDistReduced2_T_12 ? 5'hD : _notCDom_normDistReduced2_T_36; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_38 = _notCDom_normDistReduced2_T_13 ? 5'hC : _notCDom_normDistReduced2_T_37; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_39 = _notCDom_normDistReduced2_T_14 ? 5'hB : _notCDom_normDistReduced2_T_38; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_40 = _notCDom_normDistReduced2_T_15 ? 5'hA : _notCDom_normDistReduced2_T_39; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_41 = _notCDom_normDistReduced2_T_16 ? 5'h9 : _notCDom_normDistReduced2_T_40; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_42 = _notCDom_normDistReduced2_T_17 ? 5'h8 : _notCDom_normDistReduced2_T_41; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_43 = _notCDom_normDistReduced2_T_18 ? 5'h7 : _notCDom_normDistReduced2_T_42; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_44 = _notCDom_normDistReduced2_T_19 ? 5'h6 : _notCDom_normDistReduced2_T_43; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_45 = _notCDom_normDistReduced2_T_20 ? 5'h5 : _notCDom_normDistReduced2_T_44; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_46 = _notCDom_normDistReduced2_T_21 ? 5'h4 : _notCDom_normDistReduced2_T_45; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_47 = _notCDom_normDistReduced2_T_22 ? 5'h3 : _notCDom_normDistReduced2_T_46; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_48 = _notCDom_normDistReduced2_T_23 ? 5'h2 : _notCDom_normDistReduced2_T_47; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_49 = _notCDom_normDistReduced2_T_24 ? 5'h1 : _notCDom_normDistReduced2_T_48; // @[Mux.scala:50:70]
wire [4:0] notCDom_normDistReduced2 = _notCDom_normDistReduced2_T_25 ? 5'h0 : _notCDom_normDistReduced2_T_49; // @[Mux.scala:50:70]
wire [5:0] notCDom_nearNormDist = {notCDom_normDistReduced2, 1'h0}; // @[Mux.scala:50:70]
wire [6:0] _notCDom_sExp_T = {1'h0, notCDom_nearNormDist}; // @[MulAddRecFN.scala:240:56, :241:76]
wire [10:0] _notCDom_sExp_T_1 = _GEN - {{4{_notCDom_sExp_T[6]}}, _notCDom_sExp_T}; // @[MulAddRecFN.scala:203:43, :241:{46,76}]
wire [9:0] _notCDom_sExp_T_2 = _notCDom_sExp_T_1[9:0]; // @[MulAddRecFN.scala:241:46]
wire [9:0] notCDom_sExp = _notCDom_sExp_T_2; // @[MulAddRecFN.scala:241:46]
wire [113:0] _notCDom_mainSig_T = {63'h0, notCDom_absSigSum} << notCDom_nearNormDist; // @[MulAddRecFN.scala:234:12, :240:56, :243:27]
wire [28:0] notCDom_mainSig = _notCDom_mainSig_T[51:23]; // @[MulAddRecFN.scala:243:{27,50}]
wire [12:0] _notCDom_reduced4SigExtra_T = notCDom_reduced2AbsSigSum[12:0]; // @[primitives.scala:107:20]
wire [12:0] _notCDom_reduced4SigExtra_T_1 = _notCDom_reduced4SigExtra_T; // @[MulAddRecFN.scala:247:{39,55}]
wire _notCDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:106:57]
wire notCDom_reduced4SigExtra_reducedVec_0; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_1; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_2; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_3; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_4; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_5; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_6; // @[primitives.scala:101:30]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_0_T = _notCDom_reduced4SigExtra_T_1[1:0]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_0_T_1 = |_notCDom_reduced4SigExtra_reducedVec_0_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_0 = _notCDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_1_T = _notCDom_reduced4SigExtra_T_1[3:2]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_1_T_1 = |_notCDom_reduced4SigExtra_reducedVec_1_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_1 = _notCDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_2_T = _notCDom_reduced4SigExtra_T_1[5:4]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_2_T_1 = |_notCDom_reduced4SigExtra_reducedVec_2_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_2 = _notCDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_3_T = _notCDom_reduced4SigExtra_T_1[7:6]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_3_T_1 = |_notCDom_reduced4SigExtra_reducedVec_3_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_3 = _notCDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_4_T = _notCDom_reduced4SigExtra_T_1[9:8]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_4_T_1 = |_notCDom_reduced4SigExtra_reducedVec_4_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_4 = _notCDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_5_T = _notCDom_reduced4SigExtra_T_1[11:10]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_5_T_1 = |_notCDom_reduced4SigExtra_reducedVec_5_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_5 = _notCDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:101:30, :103:54]
wire _notCDom_reduced4SigExtra_reducedVec_6_T = _notCDom_reduced4SigExtra_T_1[12]; // @[primitives.scala:106:15]
assign _notCDom_reduced4SigExtra_reducedVec_6_T_1 = _notCDom_reduced4SigExtra_reducedVec_6_T; // @[primitives.scala:106:{15,57}]
assign notCDom_reduced4SigExtra_reducedVec_6 = _notCDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:101:30, :106:57]
wire [1:0] notCDom_reduced4SigExtra_lo_hi = {notCDom_reduced4SigExtra_reducedVec_2, notCDom_reduced4SigExtra_reducedVec_1}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced4SigExtra_lo = {notCDom_reduced4SigExtra_lo_hi, notCDom_reduced4SigExtra_reducedVec_0}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced4SigExtra_hi_lo = {notCDom_reduced4SigExtra_reducedVec_4, notCDom_reduced4SigExtra_reducedVec_3}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced4SigExtra_hi_hi = {notCDom_reduced4SigExtra_reducedVec_6, notCDom_reduced4SigExtra_reducedVec_5}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced4SigExtra_hi = {notCDom_reduced4SigExtra_hi_hi, notCDom_reduced4SigExtra_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] _notCDom_reduced4SigExtra_T_2 = {notCDom_reduced4SigExtra_hi, notCDom_reduced4SigExtra_lo}; // @[primitives.scala:107:20]
wire [3:0] _notCDom_reduced4SigExtra_T_3 = notCDom_normDistReduced2[4:1]; // @[Mux.scala:50:70]
wire [3:0] _notCDom_reduced4SigExtra_T_4 = ~_notCDom_reduced4SigExtra_T_3; // @[primitives.scala:52:21]
wire [16:0] notCDom_reduced4SigExtra_shift = $signed(17'sh10000 >>> _notCDom_reduced4SigExtra_T_4); // @[primitives.scala:52:21, :76:56]
wire [5:0] _notCDom_reduced4SigExtra_T_5 = notCDom_reduced4SigExtra_shift[6:1]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _notCDom_reduced4SigExtra_T_6 = _notCDom_reduced4SigExtra_T_5[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _notCDom_reduced4SigExtra_T_7 = _notCDom_reduced4SigExtra_T_6[1:0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_8 = _notCDom_reduced4SigExtra_T_7[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_9 = _notCDom_reduced4SigExtra_T_7[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_10 = {_notCDom_reduced4SigExtra_T_8, _notCDom_reduced4SigExtra_T_9}; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_11 = _notCDom_reduced4SigExtra_T_6[3:2]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_12 = _notCDom_reduced4SigExtra_T_11[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_13 = _notCDom_reduced4SigExtra_T_11[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_14 = {_notCDom_reduced4SigExtra_T_12, _notCDom_reduced4SigExtra_T_13}; // @[primitives.scala:77:20]
wire [3:0] _notCDom_reduced4SigExtra_T_15 = {_notCDom_reduced4SigExtra_T_10, _notCDom_reduced4SigExtra_T_14}; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_16 = _notCDom_reduced4SigExtra_T_5[5:4]; // @[primitives.scala:77:20, :78:22]
wire _notCDom_reduced4SigExtra_T_17 = _notCDom_reduced4SigExtra_T_16[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_18 = _notCDom_reduced4SigExtra_T_16[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_19 = {_notCDom_reduced4SigExtra_T_17, _notCDom_reduced4SigExtra_T_18}; // @[primitives.scala:77:20]
wire [5:0] _notCDom_reduced4SigExtra_T_20 = {_notCDom_reduced4SigExtra_T_15, _notCDom_reduced4SigExtra_T_19}; // @[primitives.scala:77:20]
wire [6:0] _notCDom_reduced4SigExtra_T_21 = {1'h0, _notCDom_reduced4SigExtra_T_2[5:0] & _notCDom_reduced4SigExtra_T_20}; // @[primitives.scala:77:20, :107:20]
wire notCDom_reduced4SigExtra = |_notCDom_reduced4SigExtra_T_21; // @[MulAddRecFN.scala:247:78, :249:11]
wire [25:0] _notCDom_sig_T = notCDom_mainSig[28:3]; // @[MulAddRecFN.scala:243:50, :251:28]
wire [2:0] _notCDom_sig_T_1 = notCDom_mainSig[2:0]; // @[MulAddRecFN.scala:243:50, :252:28]
wire _notCDom_sig_T_2 = |_notCDom_sig_T_1; // @[MulAddRecFN.scala:252:{28,35}]
wire _notCDom_sig_T_3 = _notCDom_sig_T_2 | notCDom_reduced4SigExtra; // @[MulAddRecFN.scala:249:11, :252:{35,39}]
wire [26:0] notCDom_sig = {_notCDom_sig_T, _notCDom_sig_T_3}; // @[MulAddRecFN.scala:251:{12,28}, :252:39]
wire [1:0] _notCDom_completeCancellation_T = notCDom_sig[26:25]; // @[MulAddRecFN.scala:251:12, :255:21]
wire notCDom_completeCancellation = _notCDom_completeCancellation_T == 2'h0; // @[primitives.scala:103:54]
wire _notCDom_sign_T = io_fromPreMul_signProd_0 ^ notCDom_signSigSum; // @[MulAddRecFN.scala:169:7, :232:36, :259:36]
wire notCDom_sign = ~notCDom_completeCancellation & _notCDom_sign_T; // @[MulAddRecFN.scala:255:50, :257:12, :259:36]
wire _GEN_0 = io_fromPreMul_isInfA_0 | io_fromPreMul_isInfB_0; // @[MulAddRecFN.scala:169:7, :264:49]
wire notNaN_isInfProd; // @[MulAddRecFN.scala:264:49]
assign notNaN_isInfProd = _GEN_0; // @[MulAddRecFN.scala:264:49]
wire _io_invalidExc_T_5; // @[MulAddRecFN.scala:275:36]
assign _io_invalidExc_T_5 = _GEN_0; // @[MulAddRecFN.scala:264:49, :275:36]
assign notNaN_isInfOut = notNaN_isInfProd | io_fromPreMul_isInfC_0; // @[MulAddRecFN.scala:169:7, :264:49, :265:44]
assign io_rawOut_isInf_0 = notNaN_isInfOut; // @[MulAddRecFN.scala:169:7, :265:44]
wire _notNaN_addZeros_T = io_fromPreMul_isZeroA_0 | io_fromPreMul_isZeroB_0; // @[MulAddRecFN.scala:169:7, :267:32]
wire notNaN_addZeros = _notNaN_addZeros_T & io_fromPreMul_isZeroC_0; // @[MulAddRecFN.scala:169:7, :267:{32,58}]
wire _io_rawOut_sign_T_4 = notNaN_addZeros; // @[MulAddRecFN.scala:267:58, :287:26]
wire _io_invalidExc_T = io_fromPreMul_isInfA_0 & io_fromPreMul_isZeroB_0; // @[MulAddRecFN.scala:169:7, :272:31]
wire _io_invalidExc_T_1 = io_fromPreMul_isSigNaNAny_0 | _io_invalidExc_T; // @[MulAddRecFN.scala:169:7, :271:35, :272:31]
wire _io_invalidExc_T_2 = io_fromPreMul_isZeroA_0 & io_fromPreMul_isInfB_0; // @[MulAddRecFN.scala:169:7, :273:32]
wire _io_invalidExc_T_3 = _io_invalidExc_T_1 | _io_invalidExc_T_2; // @[MulAddRecFN.scala:271:35, :272:57, :273:32]
wire _io_invalidExc_T_4 = ~io_fromPreMul_isNaNAOrB_0; // @[MulAddRecFN.scala:169:7, :274:10]
wire _io_invalidExc_T_6 = _io_invalidExc_T_4 & _io_invalidExc_T_5; // @[MulAddRecFN.scala:274:{10,36}, :275:36]
wire _io_invalidExc_T_7 = _io_invalidExc_T_6 & io_fromPreMul_isInfC_0; // @[MulAddRecFN.scala:169:7, :274:36, :275:61]
wire _io_invalidExc_T_8 = _io_invalidExc_T_7 & io_fromPreMul_doSubMags_0; // @[MulAddRecFN.scala:169:7, :275:61, :276:35]
assign _io_invalidExc_T_9 = _io_invalidExc_T_3 | _io_invalidExc_T_8; // @[MulAddRecFN.scala:272:57, :273:57, :276:35]
assign io_invalidExc_0 = _io_invalidExc_T_9; // @[MulAddRecFN.scala:169:7, :273:57]
assign _io_rawOut_isNaN_T = io_fromPreMul_isNaNAOrB_0 | io_fromPreMul_isNaNC_0; // @[MulAddRecFN.scala:169:7, :278:48]
assign io_rawOut_isNaN_0 = _io_rawOut_isNaN_T; // @[MulAddRecFN.scala:169:7, :278:48]
wire _io_rawOut_isZero_T = ~io_fromPreMul_CIsDominant_0; // @[MulAddRecFN.scala:169:7, :283:14]
wire _io_rawOut_isZero_T_1 = _io_rawOut_isZero_T & notCDom_completeCancellation; // @[MulAddRecFN.scala:255:50, :283:{14,42}]
assign _io_rawOut_isZero_T_2 = notNaN_addZeros | _io_rawOut_isZero_T_1; // @[MulAddRecFN.scala:267:58, :282:25, :283:42]
assign io_rawOut_isZero_0 = _io_rawOut_isZero_T_2; // @[MulAddRecFN.scala:169:7, :282:25]
wire _io_rawOut_sign_T = notNaN_isInfProd & io_fromPreMul_signProd_0; // @[MulAddRecFN.scala:169:7, :264:49, :285:27]
wire _io_rawOut_sign_T_1 = io_fromPreMul_isInfC_0 & opSignC; // @[MulAddRecFN.scala:169:7, :190:42, :286:31]
wire _io_rawOut_sign_T_2 = _io_rawOut_sign_T | _io_rawOut_sign_T_1; // @[MulAddRecFN.scala:285:{27,54}, :286:31]
wire _io_rawOut_sign_T_5 = _io_rawOut_sign_T_4 & io_fromPreMul_signProd_0; // @[MulAddRecFN.scala:169:7, :287:{26,48}]
wire _io_rawOut_sign_T_6 = _io_rawOut_sign_T_5 & opSignC; // @[MulAddRecFN.scala:190:42, :287:48, :288:36]
wire _io_rawOut_sign_T_7 = _io_rawOut_sign_T_2 | _io_rawOut_sign_T_6; // @[MulAddRecFN.scala:285:54, :286:43, :288:36]
wire _io_rawOut_sign_T_11 = _io_rawOut_sign_T_7; // @[MulAddRecFN.scala:286:43, :288:48]
wire _io_rawOut_sign_T_9 = io_fromPreMul_signProd_0 | opSignC; // @[MulAddRecFN.scala:169:7, :190:42, :290:37]
wire _io_rawOut_sign_T_12 = ~notNaN_isInfOut; // @[MulAddRecFN.scala:265:44, :291:10]
wire _io_rawOut_sign_T_13 = ~notNaN_addZeros; // @[MulAddRecFN.scala:267:58, :291:31]
wire _io_rawOut_sign_T_14 = _io_rawOut_sign_T_12 & _io_rawOut_sign_T_13; // @[MulAddRecFN.scala:291:{10,28,31}]
wire _io_rawOut_sign_T_15 = io_fromPreMul_CIsDominant_0 ? opSignC : notCDom_sign; // @[MulAddRecFN.scala:169:7, :190:42, :257:12, :292:17]
wire _io_rawOut_sign_T_16 = _io_rawOut_sign_T_14 & _io_rawOut_sign_T_15; // @[MulAddRecFN.scala:291:{28,49}, :292:17]
assign _io_rawOut_sign_T_17 = _io_rawOut_sign_T_11 | _io_rawOut_sign_T_16; // @[MulAddRecFN.scala:288:48, :290:50, :291:49]
assign io_rawOut_sign_0 = _io_rawOut_sign_T_17; // @[MulAddRecFN.scala:169:7, :290:50]
assign _io_rawOut_sExp_T = io_fromPreMul_CIsDominant_0 ? CDom_sExp : notCDom_sExp; // @[MulAddRecFN.scala:169:7, :203:43, :241:46, :293:26]
assign io_rawOut_sExp_0 = _io_rawOut_sExp_T; // @[MulAddRecFN.scala:169:7, :293:26]
assign _io_rawOut_sig_T = io_fromPreMul_CIsDominant_0 ? CDom_sig : notCDom_sig; // @[MulAddRecFN.scala:169:7, :225:12, :251:12, :294:25]
assign io_rawOut_sig_0 = _io_rawOut_sig_T; // @[MulAddRecFN.scala:169:7, :294:25]
assign io_invalidExc = io_invalidExc_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sign = io_rawOut_sign_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sig = io_rawOut_sig_0; // @[MulAddRecFN.scala:169:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_42( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_b_ready, // @[Monitor.scala:20:14]
input io_in_b_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_b_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_b_bits_size, // @[Monitor.scala:20:14]
input io_in_b_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_b_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_b_bits_data, // @[Monitor.scala:20:14]
input io_in_b_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_c_ready, // @[Monitor.scala:20:14]
input io_in_c_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_c_bits_size, // @[Monitor.scala:20:14]
input io_in_c_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14]
input [63:0] io_in_c_bits_data, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input io_in_d_bits_source, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_e_ready, // @[Monitor.scala:20:14]
input io_in_e_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_e_bits_sink // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_b_ready_0 = io_in_b_ready; // @[Monitor.scala:36:7]
wire io_in_b_valid_0 = io_in_b_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_b_bits_opcode_0 = io_in_b_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_b_bits_param_0 = io_in_b_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_b_bits_size_0 = io_in_b_bits_size; // @[Monitor.scala:36:7]
wire io_in_b_bits_source_0 = io_in_b_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_b_bits_address_0 = io_in_b_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_b_bits_mask_0 = io_in_b_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_b_bits_data_0 = io_in_b_bits_data; // @[Monitor.scala:36:7]
wire io_in_b_bits_corrupt_0 = io_in_b_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_c_ready_0 = io_in_c_ready; // @[Monitor.scala:36:7]
wire io_in_c_valid_0 = io_in_c_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_c_bits_opcode_0 = io_in_c_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_c_bits_param_0 = io_in_c_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_c_bits_size_0 = io_in_c_bits_size; // @[Monitor.scala:36:7]
wire io_in_c_bits_source_0 = io_in_c_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_c_bits_address_0 = io_in_c_bits_address; // @[Monitor.scala:36:7]
wire [63:0] io_in_c_bits_data_0 = io_in_c_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_e_ready_0 = io_in_e_ready; // @[Monitor.scala:36:7]
wire io_in_e_valid_0 = io_in_e_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_e_bits_sink_0 = io_in_e_bits_sink; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire io_in_c_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire _legal_source_T_2 = 1'h0; // @[Mux.scala:30:73]
wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57]
wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57]
wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [8:0] b_first_beats1 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] b_first_count = 9'h0; // @[Edges.scala:234:25]
wire sink_ok = 1'h1; // @[Monitor.scala:309:31]
wire sink_ok_1 = 1'h1; // @[Monitor.scala:367:31]
wire _b_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire b_first_last = 1'h1; // @[Edges.scala:232:33]
wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117]
wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48]
wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119]
wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire _source_ok_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _mask_sizeOH_T_3 = io_in_b_bits_size_0; // @[Misc.scala:202:34]
wire _legal_source_T_1 = io_in_b_bits_source_0; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T = io_in_b_bits_address_0; // @[Monitor.scala:36:7]
wire _source_ok_T_5 = io_in_c_bits_source_0; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_70 = io_in_c_bits_address_0; // @[Monitor.scala:36:7]
wire _source_ok_T_3 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = ~io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire _source_ok_WIRE_1 = _source_ok_T_1; // @[Parameters.scala:1138:31]
wire source_ok = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire _source_ok_T_2 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_2; // @[Parameters.scala:1138:31]
wire _source_ok_WIRE_1_1 = _source_ok_T_3; // @[Parameters.scala:1138:31]
wire source_ok_1 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire [32:0] _address_ok_T_1 = {1'h0, _address_ok_T}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_2 = _address_ok_T_1 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_3 = _address_ok_T_2; // @[Parameters.scala:137:46]
wire _address_ok_T_4 = _address_ok_T_3 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_0 = _address_ok_T_4; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_5 = {io_in_b_bits_address_0[31:13], io_in_b_bits_address_0[12:0] ^ 13'h1000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_6 = {1'h0, _address_ok_T_5}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_7 = _address_ok_T_6 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_8 = _address_ok_T_7; // @[Parameters.scala:137:46]
wire _address_ok_T_9 = _address_ok_T_8 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1 = _address_ok_T_9; // @[Parameters.scala:612:40]
wire [13:0] _GEN_0 = io_in_b_bits_address_0[13:0] ^ 14'h3000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_10 = {io_in_b_bits_address_0[31:14], _GEN_0}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_11 = {1'h0, _address_ok_T_10}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_12 = _address_ok_T_11 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_13 = _address_ok_T_12; // @[Parameters.scala:137:46]
wire _address_ok_T_14 = _address_ok_T_13 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_2 = _address_ok_T_14; // @[Parameters.scala:612:40]
wire [16:0] _GEN_1 = io_in_b_bits_address_0[16:0] ^ 17'h10000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_15 = {io_in_b_bits_address_0[31:17], _GEN_1}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_16 = {1'h0, _address_ok_T_15}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_17 = _address_ok_T_16 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_18 = _address_ok_T_17; // @[Parameters.scala:137:46]
wire _address_ok_T_19 = _address_ok_T_18 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_3 = _address_ok_T_19; // @[Parameters.scala:612:40]
wire [20:0] _GEN_2 = io_in_b_bits_address_0[20:0] ^ 21'h100000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_20 = {io_in_b_bits_address_0[31:21], _GEN_2}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_21 = {1'h0, _address_ok_T_20}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_22 = _address_ok_T_21 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_23 = _address_ok_T_22; // @[Parameters.scala:137:46]
wire _address_ok_T_24 = _address_ok_T_23 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_4 = _address_ok_T_24; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_25 = {io_in_b_bits_address_0[31:21], io_in_b_bits_address_0[20:0] ^ 21'h110000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_26 = {1'h0, _address_ok_T_25}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_27 = _address_ok_T_26 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_28 = _address_ok_T_27; // @[Parameters.scala:137:46]
wire _address_ok_T_29 = _address_ok_T_28 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_5 = _address_ok_T_29; // @[Parameters.scala:612:40]
wire [25:0] _GEN_3 = io_in_b_bits_address_0[25:0] ^ 26'h2000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_30 = {io_in_b_bits_address_0[31:26], _GEN_3}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_31 = {1'h0, _address_ok_T_30}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_32 = _address_ok_T_31 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_33 = _address_ok_T_32; // @[Parameters.scala:137:46]
wire _address_ok_T_34 = _address_ok_T_33 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_6 = _address_ok_T_34; // @[Parameters.scala:612:40]
wire [25:0] _GEN_4 = io_in_b_bits_address_0[25:0] ^ 26'h2010000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_35 = {io_in_b_bits_address_0[31:26], _GEN_4}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_36 = {1'h0, _address_ok_T_35}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_37 = _address_ok_T_36 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_38 = _address_ok_T_37; // @[Parameters.scala:137:46]
wire _address_ok_T_39 = _address_ok_T_38 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_7 = _address_ok_T_39; // @[Parameters.scala:612:40]
wire [27:0] _GEN_5 = io_in_b_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_40 = {io_in_b_bits_address_0[31:28], _GEN_5}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_41 = {1'h0, _address_ok_T_40}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_42 = _address_ok_T_41 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_43 = _address_ok_T_42; // @[Parameters.scala:137:46]
wire _address_ok_T_44 = _address_ok_T_43 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_8 = _address_ok_T_44; // @[Parameters.scala:612:40]
wire [27:0] _GEN_6 = io_in_b_bits_address_0[27:0] ^ 28'hC000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_45 = {io_in_b_bits_address_0[31:28], _GEN_6}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_46 = {1'h0, _address_ok_T_45}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_47 = _address_ok_T_46 & 33'h1FC000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_48 = _address_ok_T_47; // @[Parameters.scala:137:46]
wire _address_ok_T_49 = _address_ok_T_48 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_9 = _address_ok_T_49; // @[Parameters.scala:612:40]
wire [28:0] _GEN_7 = io_in_b_bits_address_0[28:0] ^ 29'h10020000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_50 = {io_in_b_bits_address_0[31:29], _GEN_7}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_51 = {1'h0, _address_ok_T_50}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_52 = _address_ok_T_51 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_53 = _address_ok_T_52; // @[Parameters.scala:137:46]
wire _address_ok_T_54 = _address_ok_T_53 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_10 = _address_ok_T_54; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_55 = io_in_b_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_56 = {1'h0, _address_ok_T_55}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_57 = _address_ok_T_56 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_58 = _address_ok_T_57; // @[Parameters.scala:137:46]
wire _address_ok_T_59 = _address_ok_T_58 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_11 = _address_ok_T_59; // @[Parameters.scala:612:40]
wire _address_ok_T_60 = _address_ok_WIRE_0 | _address_ok_WIRE_1; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_61 = _address_ok_T_60 | _address_ok_WIRE_2; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_62 = _address_ok_T_61 | _address_ok_WIRE_3; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_63 = _address_ok_T_62 | _address_ok_WIRE_4; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_64 = _address_ok_T_63 | _address_ok_WIRE_5; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_65 = _address_ok_T_64 | _address_ok_WIRE_6; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_66 = _address_ok_T_65 | _address_ok_WIRE_7; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_67 = _address_ok_T_66 | _address_ok_WIRE_8; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_68 = _address_ok_T_67 | _address_ok_WIRE_9; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_69 = _address_ok_T_68 | _address_ok_WIRE_10; // @[Parameters.scala:612:40, :636:64]
wire address_ok = _address_ok_T_69 | _address_ok_WIRE_11; // @[Parameters.scala:612:40, :636:64]
wire [26:0] _GEN_8 = 27'hFFF << io_in_b_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T_2; // @[package.scala:243:71]
assign _is_aligned_mask_T_2 = _GEN_8; // @[package.scala:243:71]
wire [26:0] _b_first_beats1_decode_T; // @[package.scala:243:71]
assign _b_first_beats1_decode_T = _GEN_8; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_3 = _is_aligned_mask_T_2[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask_1 = ~_is_aligned_mask_T_3; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T_1 = {20'h0, io_in_b_bits_address_0[11:0] & is_aligned_mask_1}; // @[package.scala:243:46]
wire is_aligned_1 = _is_aligned_T_1 == 32'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount_1 = _mask_sizeOH_T_3[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_4 = 4'h1 << mask_sizeOH_shiftAmount_1; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_5 = _mask_sizeOH_T_4[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH_1 = {_mask_sizeOH_T_5[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1_1 = io_in_b_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size_1 = mask_sizeOH_1[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit_1 = io_in_b_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2_1 = mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit_1 = ~mask_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2_1 = mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T_2 = mask_sub_sub_size_1 & mask_sub_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1_1 = mask_sub_sub_sub_0_1_1 | _mask_sub_sub_acc_T_2; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_3 = mask_sub_sub_size_1 & mask_sub_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1_1 = mask_sub_sub_sub_0_1_1 | _mask_sub_sub_acc_T_3; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size_1 = mask_sizeOH_1[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit_1 = io_in_b_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit_1 = ~mask_sub_bit_1; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2_1 = mask_sub_sub_0_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_4 = mask_sub_size_1 & mask_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1_1 = mask_sub_sub_0_1_1 | _mask_sub_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2_1 = mask_sub_sub_0_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_5 = mask_sub_size_1 & mask_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1_1 = mask_sub_sub_0_1_1 | _mask_sub_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2_1 = mask_sub_sub_1_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_6 = mask_sub_size_1 & mask_sub_2_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1_1 = mask_sub_sub_1_1_1 | _mask_sub_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2_1 = mask_sub_sub_1_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_7 = mask_sub_size_1 & mask_sub_3_2_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1_1 = mask_sub_sub_1_1_1 | _mask_sub_acc_T_7; // @[Misc.scala:215:{29,38}]
wire mask_size_1 = mask_sizeOH_1[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit_1 = io_in_b_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit_1 = ~mask_bit_1; // @[Misc.scala:210:26, :211:20]
wire mask_eq_8 = mask_sub_0_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_8 = mask_size_1 & mask_eq_8; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_8 = mask_sub_0_1_1 | _mask_acc_T_8; // @[Misc.scala:215:{29,38}]
wire mask_eq_9 = mask_sub_0_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_9 = mask_size_1 & mask_eq_9; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_9 = mask_sub_0_1_1 | _mask_acc_T_9; // @[Misc.scala:215:{29,38}]
wire mask_eq_10 = mask_sub_1_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_10 = mask_size_1 & mask_eq_10; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_10 = mask_sub_1_1_1 | _mask_acc_T_10; // @[Misc.scala:215:{29,38}]
wire mask_eq_11 = mask_sub_1_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_11 = mask_size_1 & mask_eq_11; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_11 = mask_sub_1_1_1 | _mask_acc_T_11; // @[Misc.scala:215:{29,38}]
wire mask_eq_12 = mask_sub_2_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_12 = mask_size_1 & mask_eq_12; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_12 = mask_sub_2_1_1 | _mask_acc_T_12; // @[Misc.scala:215:{29,38}]
wire mask_eq_13 = mask_sub_2_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_13 = mask_size_1 & mask_eq_13; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_13 = mask_sub_2_1_1 | _mask_acc_T_13; // @[Misc.scala:215:{29,38}]
wire mask_eq_14 = mask_sub_3_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_14 = mask_size_1 & mask_eq_14; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_14 = mask_sub_3_1_1 | _mask_acc_T_14; // @[Misc.scala:215:{29,38}]
wire mask_eq_15 = mask_sub_3_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_15 = mask_size_1 & mask_eq_15; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_15 = mask_sub_3_1_1 | _mask_acc_T_15; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo_1 = {mask_acc_9, mask_acc_8}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi_1 = {mask_acc_11, mask_acc_10}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo_1 = {mask_lo_hi_1, mask_lo_lo_1}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo_1 = {mask_acc_13, mask_acc_12}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi_1 = {mask_acc_15, mask_acc_14}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi_1 = {mask_hi_hi_1, mask_hi_lo_1}; // @[Misc.scala:222:10]
wire [7:0] mask_1 = {mask_hi_1, mask_lo_1}; // @[Misc.scala:222:10]
wire _legal_source_T = ~io_in_b_bits_source_0; // @[Monitor.scala:36:7]
wire _legal_source_WIRE_0 = _legal_source_T; // @[Parameters.scala:1138:31]
wire _legal_source_WIRE_1 = _legal_source_T_1; // @[Parameters.scala:1138:31]
wire _legal_source_T_3 = _legal_source_WIRE_1; // @[Mux.scala:30:73]
wire _legal_source_T_4 = _legal_source_T_3; // @[Mux.scala:30:73]
wire _legal_source_WIRE_1_0 = _legal_source_T_4; // @[Mux.scala:30:73]
wire legal_source = _legal_source_WIRE_1_0 == io_in_b_bits_source_0; // @[Mux.scala:30:73]
wire _source_ok_T_4 = ~io_in_c_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_2_0 = _source_ok_T_4; // @[Parameters.scala:1138:31]
wire _source_ok_WIRE_2_1 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire source_ok_2 = _source_ok_WIRE_2_0 | _source_ok_WIRE_2_1; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN_9 = 27'hFFF << io_in_c_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T_4; // @[package.scala:243:71]
assign _is_aligned_mask_T_4 = _GEN_9; // @[package.scala:243:71]
wire [26:0] _c_first_beats1_decode_T; // @[package.scala:243:71]
assign _c_first_beats1_decode_T = _GEN_9; // @[package.scala:243:71]
wire [26:0] _c_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _c_first_beats1_decode_T_3 = _GEN_9; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_5 = _is_aligned_mask_T_4[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask_2 = ~_is_aligned_mask_T_5; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T_2 = {20'h0, io_in_c_bits_address_0[11:0] & is_aligned_mask_2}; // @[package.scala:243:46]
wire is_aligned_2 = _is_aligned_T_2 == 32'h0; // @[Edges.scala:21:{16,24}]
wire [32:0] _address_ok_T_71 = {1'h0, _address_ok_T_70}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_72 = _address_ok_T_71 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_73 = _address_ok_T_72; // @[Parameters.scala:137:46]
wire _address_ok_T_74 = _address_ok_T_73 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_0 = _address_ok_T_74; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_75 = {io_in_c_bits_address_0[31:13], io_in_c_bits_address_0[12:0] ^ 13'h1000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_76 = {1'h0, _address_ok_T_75}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_77 = _address_ok_T_76 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_78 = _address_ok_T_77; // @[Parameters.scala:137:46]
wire _address_ok_T_79 = _address_ok_T_78 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_1 = _address_ok_T_79; // @[Parameters.scala:612:40]
wire [13:0] _GEN_10 = io_in_c_bits_address_0[13:0] ^ 14'h3000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_80 = {io_in_c_bits_address_0[31:14], _GEN_10}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_81 = {1'h0, _address_ok_T_80}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_82 = _address_ok_T_81 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_83 = _address_ok_T_82; // @[Parameters.scala:137:46]
wire _address_ok_T_84 = _address_ok_T_83 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_2 = _address_ok_T_84; // @[Parameters.scala:612:40]
wire [16:0] _GEN_11 = io_in_c_bits_address_0[16:0] ^ 17'h10000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_85 = {io_in_c_bits_address_0[31:17], _GEN_11}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_86 = {1'h0, _address_ok_T_85}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_87 = _address_ok_T_86 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_88 = _address_ok_T_87; // @[Parameters.scala:137:46]
wire _address_ok_T_89 = _address_ok_T_88 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_3 = _address_ok_T_89; // @[Parameters.scala:612:40]
wire [20:0] _GEN_12 = io_in_c_bits_address_0[20:0] ^ 21'h100000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_90 = {io_in_c_bits_address_0[31:21], _GEN_12}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_91 = {1'h0, _address_ok_T_90}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_92 = _address_ok_T_91 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_93 = _address_ok_T_92; // @[Parameters.scala:137:46]
wire _address_ok_T_94 = _address_ok_T_93 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_4 = _address_ok_T_94; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_95 = {io_in_c_bits_address_0[31:21], io_in_c_bits_address_0[20:0] ^ 21'h110000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_96 = {1'h0, _address_ok_T_95}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_97 = _address_ok_T_96 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_98 = _address_ok_T_97; // @[Parameters.scala:137:46]
wire _address_ok_T_99 = _address_ok_T_98 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_5 = _address_ok_T_99; // @[Parameters.scala:612:40]
wire [25:0] _GEN_13 = io_in_c_bits_address_0[25:0] ^ 26'h2000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_100 = {io_in_c_bits_address_0[31:26], _GEN_13}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_101 = {1'h0, _address_ok_T_100}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_102 = _address_ok_T_101 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_103 = _address_ok_T_102; // @[Parameters.scala:137:46]
wire _address_ok_T_104 = _address_ok_T_103 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_6 = _address_ok_T_104; // @[Parameters.scala:612:40]
wire [25:0] _GEN_14 = io_in_c_bits_address_0[25:0] ^ 26'h2010000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_105 = {io_in_c_bits_address_0[31:26], _GEN_14}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_106 = {1'h0, _address_ok_T_105}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_107 = _address_ok_T_106 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_108 = _address_ok_T_107; // @[Parameters.scala:137:46]
wire _address_ok_T_109 = _address_ok_T_108 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_7 = _address_ok_T_109; // @[Parameters.scala:612:40]
wire [27:0] _GEN_15 = io_in_c_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_110 = {io_in_c_bits_address_0[31:28], _GEN_15}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_111 = {1'h0, _address_ok_T_110}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_112 = _address_ok_T_111 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_113 = _address_ok_T_112; // @[Parameters.scala:137:46]
wire _address_ok_T_114 = _address_ok_T_113 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_8 = _address_ok_T_114; // @[Parameters.scala:612:40]
wire [27:0] _GEN_16 = io_in_c_bits_address_0[27:0] ^ 28'hC000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_115 = {io_in_c_bits_address_0[31:28], _GEN_16}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_116 = {1'h0, _address_ok_T_115}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_117 = _address_ok_T_116 & 33'h1FC000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_118 = _address_ok_T_117; // @[Parameters.scala:137:46]
wire _address_ok_T_119 = _address_ok_T_118 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_9 = _address_ok_T_119; // @[Parameters.scala:612:40]
wire [28:0] _GEN_17 = io_in_c_bits_address_0[28:0] ^ 29'h10020000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_120 = {io_in_c_bits_address_0[31:29], _GEN_17}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_121 = {1'h0, _address_ok_T_120}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_122 = _address_ok_T_121 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_123 = _address_ok_T_122; // @[Parameters.scala:137:46]
wire _address_ok_T_124 = _address_ok_T_123 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_10 = _address_ok_T_124; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_125 = io_in_c_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_126 = {1'h0, _address_ok_T_125}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_127 = _address_ok_T_126 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_128 = _address_ok_T_127; // @[Parameters.scala:137:46]
wire _address_ok_T_129 = _address_ok_T_128 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_11 = _address_ok_T_129; // @[Parameters.scala:612:40]
wire _address_ok_T_130 = _address_ok_WIRE_1_0 | _address_ok_WIRE_1_1; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_131 = _address_ok_T_130 | _address_ok_WIRE_1_2; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_132 = _address_ok_T_131 | _address_ok_WIRE_1_3; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_133 = _address_ok_T_132 | _address_ok_WIRE_1_4; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_134 = _address_ok_T_133 | _address_ok_WIRE_1_5; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_135 = _address_ok_T_134 | _address_ok_WIRE_1_6; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_136 = _address_ok_T_135 | _address_ok_WIRE_1_7; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_137 = _address_ok_T_136 | _address_ok_WIRE_1_8; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_138 = _address_ok_T_137 | _address_ok_WIRE_1_9; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_139 = _address_ok_T_138 | _address_ok_WIRE_1_10; // @[Parameters.scala:612:40, :636:64]
wire address_ok_1 = _address_ok_T_139 | _address_ok_WIRE_1_11; // @[Parameters.scala:612:40, :636:64]
wire _T_2389 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_2389; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_2389; // @[Decoupled.scala:51:35]
wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _T_2463 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_2463; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_2463; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_2463; // @[Decoupled.scala:51:35]
wire _d_first_T_3; // @[Decoupled.scala:51:35]
assign _d_first_T_3 = _T_2463; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_18 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_18; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_18; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_18; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_9; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_9 = _GEN_18; // @[package.scala:243:71]
wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_3 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg source_1; // @[Monitor.scala:541:22]
reg [2:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
wire _b_first_T = io_in_b_ready_0 & io_in_b_valid_0; // @[Decoupled.scala:51:35]
wire b_first_done = _b_first_T; // @[Decoupled.scala:51:35]
wire [11:0] _b_first_beats1_decode_T_1 = _b_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _b_first_beats1_decode_T_2 = ~_b_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] b_first_beats1_decode = _b_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _b_first_beats1_opdata_T = io_in_b_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire b_first_beats1_opdata = ~_b_first_beats1_opdata_T; // @[Edges.scala:97:{28,37}]
reg [8:0] b_first_counter; // @[Edges.scala:229:27]
wire [9:0] _b_first_counter1_T = {1'h0, b_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] b_first_counter1 = _b_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire b_first = b_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _b_first_last_T = b_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire [8:0] _b_first_count_T = ~b_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] _b_first_counter_T = b_first ? 9'h0 : b_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode_2; // @[Monitor.scala:410:22]
reg [1:0] param_2; // @[Monitor.scala:411:22]
reg [3:0] size_2; // @[Monitor.scala:412:22]
reg source_2; // @[Monitor.scala:413:22]
reg [31:0] address_1; // @[Monitor.scala:414:22]
wire _T_2460 = io_in_c_ready_0 & io_in_c_valid_0; // @[Decoupled.scala:51:35]
wire _c_first_T; // @[Decoupled.scala:51:35]
assign _c_first_T = _T_2460; // @[Decoupled.scala:51:35]
wire _c_first_T_1; // @[Decoupled.scala:51:35]
assign _c_first_T_1 = _T_2460; // @[Decoupled.scala:51:35]
wire [11:0] _c_first_beats1_decode_T_1 = _c_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _c_first_beats1_decode_T_2 = ~_c_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] c_first_beats1_decode = _c_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire c_first_beats1_opdata = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire c_first_beats1_opdata_1 = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] c_first_beats1 = c_first_beats1_opdata ? c_first_beats1_decode : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [8:0] c_first_counter; // @[Edges.scala:229:27]
wire [9:0] _c_first_counter1_T = {1'h0, c_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] c_first_counter1 = _c_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire c_first = c_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _c_first_last_T = c_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _c_first_last_T_1 = c_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire c_first_last = _c_first_last_T | _c_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire c_first_done = c_first_last & _c_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _c_first_count_T = ~c_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] c_first_count = c_first_beats1 & _c_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _c_first_counter_T = c_first ? c_first_beats1 : c_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_3; // @[Monitor.scala:515:22]
reg [2:0] param_3; // @[Monitor.scala:516:22]
reg [3:0] size_3; // @[Monitor.scala:517:22]
reg source_3; // @[Monitor.scala:518:22]
reg [31:0] address_2; // @[Monitor.scala:519:22]
reg [1:0] inflight; // @[Monitor.scala:614:27]
reg [7:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [15:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [1:0] a_set; // @[Monitor.scala:626:34]
wire [1:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [7:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [15:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [3:0] _GEN_19 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [3:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_19; // @[Monitor.scala:637:69]
wire [3:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_19; // @[Monitor.scala:637:69, :680:101]
wire [3:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_19; // @[Monitor.scala:637:69, :749:69]
wire [3:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_19; // @[Monitor.scala:637:69, :790:101]
wire [7:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [15:0] _a_opcode_lookup_T_6 = {8'h0, _a_opcode_lookup_T_1 & 8'hF}; // @[Monitor.scala:637:{44,97}]
wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [7:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [3:0] _GEN_20 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65]
wire [3:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_20; // @[Monitor.scala:641:65]
wire [3:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_20; // @[Monitor.scala:641:65, :681:99]
wire [3:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_20; // @[Monitor.scala:641:65, :750:67]
wire [3:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_20; // @[Monitor.scala:641:65, :791:99]
wire [15:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [15:0] _a_size_lookup_T_6 = _a_size_lookup_T_1 & 16'hFF; // @[Monitor.scala:641:{40,91}]
wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [1:0] _GEN_21 = {1'h0, io_in_a_bits_source_0}; // @[OneHot.scala:58:35]
wire [1:0] _GEN_22 = 2'h1 << _GEN_21; // @[OneHot.scala:58:35]
wire [1:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_22; // @[OneHot.scala:58:35]
wire [1:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_22; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T : 2'h0; // @[OneHot.scala:58:35]
wire _T_2315 = _T_2389 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_2315 ? _a_set_T : 2'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_2315 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_2315 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [3:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_2315 ? _a_opcodes_set_T_1[7:0] : 8'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [3:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77]
wire [19:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :660:{52,77}]
assign a_sizes_set = _T_2315 ? _a_sizes_set_T_1[15:0] : 16'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [1:0] d_clr; // @[Monitor.scala:664:34]
wire [1:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [7:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [15:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_23 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_23; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_23; // @[Monitor.scala:673:46, :783:46]
wire _T_2361 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [1:0] _GEN_24 = {1'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35]
wire [1:0] _GEN_25 = 2'h1 << _GEN_24; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_25; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_25; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_25; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_25; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_2361 & ~d_release_ack ? _d_clr_wo_ready_T : 2'h0; // @[OneHot.scala:58:35]
wire _T_2330 = _T_2463 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_2330 ? _d_clr_T : 2'h0; // @[OneHot.scala:58:35]
wire [30:0] _d_opcodes_clr_T_5 = 31'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_2330 ? _d_opcodes_clr_T_5[7:0] : 8'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [30:0] _d_sizes_clr_T_5 = 31'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_2330 ? _d_sizes_clr_T_5[15:0] : 16'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [1:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [1:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [1:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [7:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [7:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [7:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [15:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [15:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [15:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [1:0] inflight_1; // @[Monitor.scala:726:35]
reg [7:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
reg [15:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [11:0] _c_first_beats1_decode_T_4 = _c_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _c_first_beats1_decode_T_5 = ~_c_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] c_first_beats1_decode_1 = _c_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] c_first_beats1_1 = c_first_beats1_opdata_1 ? c_first_beats1_decode_1 : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [8:0] c_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _c_first_counter1_T_1 = {1'h0, c_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] c_first_counter1_1 = _c_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire c_first_1 = c_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _c_first_last_T_2 = c_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _c_first_last_T_3 = c_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire c_first_last_1 = _c_first_last_T_2 | _c_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire c_first_done_1 = c_first_last_1 & _c_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _c_first_count_T_1 = ~c_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] c_first_count_1 = c_first_beats1_1 & _c_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _c_first_counter_T_1 = c_first_1 ? c_first_beats1_1 : c_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [1:0] c_set; // @[Monitor.scala:738:34]
wire [1:0] c_set_wo_ready; // @[Monitor.scala:739:34]
wire [7:0] c_opcodes_set; // @[Monitor.scala:740:34]
wire [15:0] c_sizes_set; // @[Monitor.scala:741:34]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [7:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [7:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [15:0] _c_opcode_lookup_T_6 = {8'h0, _c_opcode_lookup_T_1 & 8'hF}; // @[Monitor.scala:749:{44,97}]
wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [15:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [15:0] _c_size_lookup_T_6 = _c_size_lookup_T_1 & 16'hFF; // @[Monitor.scala:750:{42,93}]
wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [3:0] c_opcodes_set_interm; // @[Monitor.scala:754:40]
wire [4:0] c_sizes_set_interm; // @[Monitor.scala:755:40]
wire _same_cycle_resp_T_3 = io_in_c_valid_0 & c_first_1; // @[Monitor.scala:36:7, :759:26, :795:44]
wire _same_cycle_resp_T_4 = io_in_c_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _same_cycle_resp_T_5 = io_in_c_bits_opcode_0[1]; // @[Monitor.scala:36:7]
wire [1:0] _GEN_26 = {1'h0, io_in_c_bits_source_0}; // @[OneHot.scala:58:35]
wire [1:0] _GEN_27 = 2'h1 << _GEN_26; // @[OneHot.scala:58:35]
wire [1:0] _c_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _c_set_wo_ready_T = _GEN_27; // @[OneHot.scala:58:35]
wire [1:0] _c_set_T; // @[OneHot.scala:58:35]
assign _c_set_T = _GEN_27; // @[OneHot.scala:58:35]
assign c_set_wo_ready = _same_cycle_resp_T_3 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5 ? _c_set_wo_ready_T : 2'h0; // @[OneHot.scala:58:35]
wire _T_2402 = _T_2460 & c_first_1 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Decoupled.scala:51:35]
assign c_set = _T_2402 ? _c_set_T : 2'h0; // @[OneHot.scala:58:35]
wire [3:0] _c_opcodes_set_interm_T = {io_in_c_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :765:53]
wire [3:0] _c_opcodes_set_interm_T_1 = {_c_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:765:{53,61}]
assign c_opcodes_set_interm = _T_2402 ? _c_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:754:40, :763:{25,36,70}, :765:{28,61}]
wire [4:0] _c_sizes_set_interm_T = {io_in_c_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :766:51]
wire [4:0] _c_sizes_set_interm_T_1 = {_c_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:766:{51,59}]
assign c_sizes_set_interm = _T_2402 ? _c_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:755:40, :763:{25,36,70}, :766:{28,59}]
wire [3:0] _c_opcodes_set_T = {1'h0, io_in_c_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :767:79]
wire [18:0] _c_opcodes_set_T_1 = {15'h0, c_opcodes_set_interm} << _c_opcodes_set_T; // @[Monitor.scala:754:40, :767:{54,79}]
assign c_opcodes_set = _T_2402 ? _c_opcodes_set_T_1[7:0] : 8'h0; // @[Monitor.scala:740:34, :763:{25,36,70}, :767:{28,54}]
wire [3:0] _c_sizes_set_T = {io_in_c_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :768:77]
wire [19:0] _c_sizes_set_T_1 = {15'h0, c_sizes_set_interm} << _c_sizes_set_T; // @[Monitor.scala:755:40, :768:{52,77}]
assign c_sizes_set = _T_2402 ? _c_sizes_set_T_1[15:0] : 16'h0; // @[Monitor.scala:741:34, :763:{25,36,70}, :768:{28,52}]
wire _c_probe_ack_T = io_in_c_bits_opcode_0 == 3'h4; // @[Monitor.scala:36:7, :772:47]
wire _c_probe_ack_T_1 = io_in_c_bits_opcode_0 == 3'h5; // @[Monitor.scala:36:7, :772:95]
wire c_probe_ack = _c_probe_ack_T | _c_probe_ack_T_1; // @[Monitor.scala:772:{47,71,95}]
wire [1:0] d_clr_1; // @[Monitor.scala:774:34]
wire [1:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [7:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [15:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_2433 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_2433 & d_release_ack_1 ? _d_clr_wo_ready_T_1 : 2'h0; // @[OneHot.scala:58:35]
wire _T_2415 = _T_2463 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_2415 ? _d_clr_T_1 : 2'h0; // @[OneHot.scala:58:35]
wire [30:0] _d_opcodes_clr_T_11 = 31'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_2415 ? _d_opcodes_clr_T_11[7:0] : 8'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [30:0] _d_sizes_clr_T_11 = 31'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_2415 ? _d_sizes_clr_T_11[15:0] : 16'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_6 = _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Edges.scala:68:{36,40,51}]
wire _same_cycle_resp_T_7 = _same_cycle_resp_T_3 & _same_cycle_resp_T_6; // @[Monitor.scala:795:{44,55}]
wire _same_cycle_resp_T_8 = io_in_c_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113]
wire same_cycle_resp_1 = _same_cycle_resp_T_7 & _same_cycle_resp_T_8; // @[Monitor.scala:795:{55,88,113}]
wire [1:0] _inflight_T_3 = inflight_1 | c_set; // @[Monitor.scala:726:35, :738:34, :814:35]
wire [1:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [1:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [7:0] _inflight_opcodes_T_3 = inflight_opcodes_1 | c_opcodes_set; // @[Monitor.scala:727:35, :740:34, :815:43]
wire [7:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [7:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [15:0] _inflight_sizes_T_3 = inflight_sizes_1 | c_sizes_set; // @[Monitor.scala:728:35, :741:34, :816:41]
wire [15:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [15:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
wire [32:0] _watchdog_T_2 = {1'h0, watchdog_1} + 33'h1; // @[Monitor.scala:818:27, :823:26]
wire [31:0] _watchdog_T_3 = _watchdog_T_2[31:0]; // @[Monitor.scala:823:26]
reg [7:0] inflight_2; // @[Monitor.scala:828:27]
wire [11:0] _d_first_beats1_decode_T_10 = _d_first_beats1_decode_T_9[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_11 = ~_d_first_beats1_decode_T_10; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_3 = _d_first_beats1_decode_T_11[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_3 = d_first_beats1_opdata_3 ? d_first_beats1_decode_3 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_3; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_3 = {1'h0, d_first_counter_3} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_3 = _d_first_counter1_T_3[8:0]; // @[Edges.scala:230:28]
wire d_first_3 = d_first_counter_3 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_6 = d_first_counter_3 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_7 = d_first_beats1_3 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_3 = _d_first_last_T_6 | _d_first_last_T_7; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_3 = d_first_last_3 & _d_first_T_3; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_3 = ~d_first_counter1_3; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_3 = d_first_beats1_3 & _d_first_count_T_3; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_3 = d_first_3 ? d_first_beats1_3 : d_first_counter1_3; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [7:0] d_set; // @[Monitor.scala:833:25]
wire _T_2469 = _T_2463 & d_first_3 & io_in_d_bits_opcode_0[2] & ~(io_in_d_bits_opcode_0[1]); // @[Decoupled.scala:51:35]
wire [7:0] _GEN_28 = {5'h0, io_in_d_bits_sink_0}; // @[OneHot.scala:58:35]
wire [7:0] _d_set_T = 8'h1 << _GEN_28; // @[OneHot.scala:58:35]
assign d_set = _T_2469 ? _d_set_T : 8'h0; // @[OneHot.scala:58:35]
wire [7:0] e_clr; // @[Monitor.scala:839:25]
wire _T_2478 = io_in_e_ready_0 & io_in_e_valid_0; // @[Decoupled.scala:51:35]
wire [7:0] _GEN_29 = {5'h0, io_in_e_bits_sink_0}; // @[OneHot.scala:58:35]
wire [7:0] _e_clr_T = 8'h1 << _GEN_29; // @[OneHot.scala:58:35]
assign e_clr = _T_2478 ? _e_clr_T : 8'h0; // @[OneHot.scala:58:35] |
Generate the Verilog code corresponding to the following Chisel files.
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Replacement.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import freechips.rocketchip.util.property.cover
abstract class ReplacementPolicy {
def nBits: Int
def perSet: Boolean
def way: UInt
def miss: Unit
def hit: Unit
def access(touch_way: UInt): Unit
def access(touch_ways: Seq[Valid[UInt]]): Unit
def state_read: UInt
def get_next_state(state: UInt, touch_way: UInt): UInt
def get_next_state(state: UInt, touch_ways: Seq[Valid[UInt]]): UInt = {
touch_ways.foldLeft(state)((prev, touch_way) => Mux(touch_way.valid, get_next_state(prev, touch_way.bits), prev))
}
def get_replace_way(state: UInt): UInt
}
object ReplacementPolicy {
def fromString(s: String, n_ways: Int): ReplacementPolicy = s.toLowerCase match {
case "random" => new RandomReplacement(n_ways)
case "lru" => new TrueLRU(n_ways)
case "plru" => new PseudoLRU(n_ways)
case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t")
}
}
class RandomReplacement(n_ways: Int) extends ReplacementPolicy {
private val replace = Wire(Bool())
replace := false.B
def nBits = 16
def perSet = false
private val lfsr = LFSR(nBits, replace)
def state_read = WireDefault(lfsr)
def way = Random(n_ways, lfsr)
def miss = replace := true.B
def hit = {}
def access(touch_way: UInt) = {}
def access(touch_ways: Seq[Valid[UInt]]) = {}
def get_next_state(state: UInt, touch_way: UInt) = 0.U //DontCare
def get_replace_way(state: UInt) = way
}
abstract class SeqReplacementPolicy {
def access(set: UInt): Unit
def update(valid: Bool, hit: Bool, set: UInt, way: UInt): Unit
def way: UInt
}
abstract class SetAssocReplacementPolicy {
def access(set: UInt, touch_way: UInt): Unit
def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]): Unit
def way(set: UInt): UInt
}
class SeqRandom(n_ways: Int) extends SeqReplacementPolicy {
val logic = new RandomReplacement(n_ways)
def access(set: UInt) = { }
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
when (valid && !hit) { logic.miss }
}
def way = logic.way
}
class TrueLRU(n_ways: Int) extends ReplacementPolicy {
// True LRU replacement policy, using a triangular matrix to track which sets are more recently used than others.
// The matrix is packed into a single UInt (or Bits). Example 4-way (6-bits):
// [5] - 3 more recent than 2
// [4] - 3 more recent than 1
// [3] - 2 more recent than 1
// [2] - 3 more recent than 0
// [1] - 2 more recent than 0
// [0] - 1 more recent than 0
def nBits = (n_ways * (n_ways-1)) / 2
def perSet = true
private val state_reg = RegInit(0.U(nBits.W))
def state_read = WireDefault(state_reg)
private def extractMRUVec(state: UInt): Seq[UInt] = {
// Extract per-way information about which higher-indexed ways are more recently used
val moreRecentVec = Wire(Vec(n_ways-1, UInt(n_ways.W)))
var lsb = 0
for (i <- 0 until n_ways-1) {
moreRecentVec(i) := Cat(state(lsb+n_ways-i-2,lsb), 0.U((i+1).W))
lsb = lsb + (n_ways - i - 1)
}
moreRecentVec
}
def get_next_state(state: UInt, touch_way: UInt): UInt = {
val nextState = Wire(Vec(n_ways-1, UInt(n_ways.W)))
val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix
val wayDec = UIntToOH(touch_way, n_ways)
// Compute next value of triangular matrix
// set the touched way as more recent than every other way
nextState.zipWithIndex.map { case (e, i) =>
e := Mux(i.U === touch_way, 0.U(n_ways.W), moreRecentVec(i) | wayDec)
}
nextState.zipWithIndex.tail.foldLeft((nextState.head.apply(n_ways-1,1),0)) { case ((pe,pi),(ce,ci)) => (Cat(ce.apply(n_ways-1,ci+1), pe), ci) }._1
}
def access(touch_way: UInt): Unit = {
state_reg := get_next_state(state_reg, touch_way)
}
def access(touch_ways: Seq[Valid[UInt]]): Unit = {
when (touch_ways.map(_.valid).orR) {
state_reg := get_next_state(state_reg, touch_ways)
}
for (i <- 1 until touch_ways.size) {
cover(PopCount(touch_ways.map(_.valid)) === i.U, s"LRU_UpdateCount$i", s"LRU Update $i simultaneous")
}
}
def get_replace_way(state: UInt): UInt = {
val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix
// For each way, determine if all other ways are more recent
val mruWayDec = (0 until n_ways).map { i =>
val upperMoreRecent = (if (i == n_ways-1) true.B else moreRecentVec(i).apply(n_ways-1,i+1).andR)
val lowerMoreRecent = (if (i == 0) true.B else moreRecentVec.map(e => !e(i)).reduce(_ && _))
upperMoreRecent && lowerMoreRecent
}
OHToUInt(mruWayDec)
}
def way = get_replace_way(state_reg)
def miss = access(way)
def hit = {}
@deprecated("replace 'replace' with 'way' from abstract class ReplacementPolicy","Rocket Chip 2020.05")
def replace: UInt = way
}
class PseudoLRU(n_ways: Int) extends ReplacementPolicy {
// Pseudo-LRU tree algorithm: https://en.wikipedia.org/wiki/Pseudo-LRU#Tree-PLRU
//
//
// - bits storage example for 4-way PLRU binary tree:
// bit[2]: ways 3+2 older than ways 1+0
// / \
// bit[1]: way 3 older than way 2 bit[0]: way 1 older than way 0
//
//
// - bits storage example for 3-way PLRU binary tree:
// bit[1]: way 2 older than ways 1+0
// \
// bit[0]: way 1 older than way 0
//
//
// - bits storage example for 8-way PLRU binary tree:
// bit[6]: ways 7-4 older than ways 3-0
// / \
// bit[5]: ways 7+6 > 5+4 bit[2]: ways 3+2 > 1+0
// / \ / \
// bit[4]: way 7>6 bit[3]: way 5>4 bit[1]: way 3>2 bit[0]: way 1>0
def nBits = n_ways - 1
def perSet = true
private val state_reg = if (nBits == 0) Reg(UInt(0.W)) else RegInit(0.U(nBits.W))
def state_read = WireDefault(state_reg)
def access(touch_way: UInt): Unit = {
state_reg := get_next_state(state_reg, touch_way)
}
def access(touch_ways: Seq[Valid[UInt]]): Unit = {
when (touch_ways.map(_.valid).orR) {
state_reg := get_next_state(state_reg, touch_ways)
}
for (i <- 1 until touch_ways.size) {
cover(PopCount(touch_ways.map(_.valid)) === i.U, s"PLRU_UpdateCount$i", s"PLRU Update $i simultaneous")
}
}
/** @param state state_reg bits for this sub-tree
* @param touch_way touched way encoded value bits for this sub-tree
* @param tree_nways number of ways in this sub-tree
*/
def get_next_state(state: UInt, touch_way: UInt, tree_nways: Int): UInt = {
require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways")
require(touch_way.getWidth == (log2Ceil(tree_nways) max 1), s"wrong encoded way width ${touch_way.getWidth} for $tree_nways ways")
if (tree_nways > 2) {
// we are at a branching node in the tree, so recurse
val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree
val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree
val set_left_older = !touch_way(log2Ceil(tree_nways)-1)
val left_subtree_state = state.extract(tree_nways-3, right_nways-1)
val right_subtree_state = state(right_nways-2, 0)
if (left_nways > 1) {
// we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees
Cat(set_left_older,
Mux(set_left_older,
left_subtree_state, // if setting left sub-tree as older, do NOT recurse into left sub-tree
get_next_state(left_subtree_state, touch_way.extract(log2Ceil(left_nways)-1,0), left_nways)), // recurse left if newer
Mux(set_left_older,
get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer
right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree
} else {
// we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree
Cat(set_left_older,
Mux(set_left_older,
get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer
right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree
}
} else if (tree_nways == 2) {
// we are at a leaf node at the end of the tree, so set the single state bit opposite of the lsb of the touched way encoded value
!touch_way(0)
} else { // tree_nways <= 1
// we are at an empty node in an empty tree for 1 way, so return single zero bit for Chisel (no zero-width wires)
0.U(1.W)
}
}
def get_next_state(state: UInt, touch_way: UInt): UInt = {
val touch_way_sized = if (touch_way.getWidth < log2Ceil(n_ways)) touch_way.padTo (log2Ceil(n_ways))
else touch_way.extract(log2Ceil(n_ways)-1,0)
get_next_state(state, touch_way_sized, n_ways)
}
/** @param state state_reg bits for this sub-tree
* @param tree_nways number of ways in this sub-tree
*/
def get_replace_way(state: UInt, tree_nways: Int): UInt = {
require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways")
// this algorithm recursively descends the binary tree, filling in the way-to-replace encoded value from msb to lsb
if (tree_nways > 2) {
// we are at a branching node in the tree, so recurse
val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree
val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree
val left_subtree_older = state(tree_nways-2)
val left_subtree_state = state.extract(tree_nways-3, right_nways-1)
val right_subtree_state = state(right_nways-2, 0)
if (left_nways > 1) {
// we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees
Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value
Mux(left_subtree_older, // if left sub-tree is older, recurse left, else recurse right
get_replace_way(left_subtree_state, left_nways), // recurse left
get_replace_way(right_subtree_state, right_nways))) // recurse right
} else {
// we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree
Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value
Mux(left_subtree_older, // if left sub-tree is older, return and do not recurse right
0.U(1.W),
get_replace_way(right_subtree_state, right_nways))) // recurse right
}
} else if (tree_nways == 2) {
// we are at a leaf node at the end of the tree, so just return the single state bit as lsb of the way-to-replace encoded value
state(0)
} else { // tree_nways <= 1
// we are at an empty node in an unbalanced tree for non-power-of-2 ways, so return single zero bit as lsb of the way-to-replace encoded value
0.U(1.W)
}
}
def get_replace_way(state: UInt): UInt = get_replace_way(state, n_ways)
def way = get_replace_way(state_reg)
def miss = access(way)
def hit = {}
}
class SeqPLRU(n_sets: Int, n_ways: Int) extends SeqReplacementPolicy {
val logic = new PseudoLRU(n_ways)
val state = SyncReadMem(n_sets, UInt(logic.nBits.W))
val current_state = Wire(UInt(logic.nBits.W))
val next_state = Wire(UInt(logic.nBits.W))
val plru_way = logic.get_replace_way(current_state)
def access(set: UInt) = {
current_state := state.read(set)
}
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
val update_way = Mux(hit, way, plru_way)
next_state := logic.get_next_state(current_state, update_way)
when (valid) { state.write(set, next_state) }
}
def way = plru_way
}
class SetAssocLRU(n_sets: Int, n_ways: Int, policy: String) extends SetAssocReplacementPolicy {
val logic = policy.toLowerCase match {
case "plru" => new PseudoLRU(n_ways)
case "lru" => new TrueLRU(n_ways)
case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t")
}
val state_vec =
if (logic.nBits == 0) Reg(Vec(n_sets, UInt(logic.nBits.W))) // Work around elaboration error on following line
else RegInit(VecInit(Seq.fill(n_sets)(0.U(logic.nBits.W))))
def access(set: UInt, touch_way: UInt) = {
state_vec(set) := logic.get_next_state(state_vec(set), touch_way)
}
def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]) = {
require(sets.size == touch_ways.size, "internal consistency check: should be same number of simultaneous updates for sets and touch_ways")
for (set <- 0 until n_sets) {
val set_touch_ways = (sets zip touch_ways).map { case (touch_set, touch_way) =>
Pipe(touch_way.valid && (touch_set === set.U), touch_way.bits, 0)}
when (set_touch_ways.map(_.valid).orR) {
state_vec(set) := logic.get_next_state(state_vec(set), set_touch_ways)
}
}
}
def way(set: UInt) = logic.get_replace_way(state_vec(set))
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class PLRUTest(n_ways: Int, timeout: Int = 500) extends UnitTest(timeout) {
val plru = new PseudoLRU(n_ways)
// step
io.finished := RegNext(true.B, false.B)
val get_replace_ways = (0 until (1 << (n_ways-1))).map(state =>
plru.get_replace_way(state = state.U((n_ways-1).W)))
val get_next_states = (0 until (1 << (n_ways-1))).map(state => (0 until n_ways).map(way =>
plru.get_next_state (state = state.U((n_ways-1).W), touch_way = way.U(log2Ceil(n_ways).W))))
n_ways match {
case 2 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_next_states(0)(0) === 1.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=1 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 0.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=0 actual=%d", get_next_states(0)(1))
assert(get_next_states(1)(0) === 1.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=1 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 0.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=0 actual=%d", get_next_states(1)(1))
}
case 3 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_replace_ways(2) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=2 actual=%d", get_replace_ways(2))
assert(get_replace_ways(3) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=2 actual=%d", get_replace_ways(3))
assert(get_next_states(0)(0) === 3.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=3 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 2.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=2 actual=%d", get_next_states(0)(1))
assert(get_next_states(0)(2) === 0.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=0 actual=%d", get_next_states(0)(2))
assert(get_next_states(1)(0) === 3.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=3 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 2.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=2 actual=%d", get_next_states(1)(1))
assert(get_next_states(1)(2) === 1.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=1 actual=%d", get_next_states(1)(2))
assert(get_next_states(2)(0) === 3.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=3 actual=%d", get_next_states(2)(0))
assert(get_next_states(2)(1) === 2.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=2 actual=%d", get_next_states(2)(1))
assert(get_next_states(2)(2) === 0.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=0 actual=%d", get_next_states(2)(2))
assert(get_next_states(3)(0) === 3.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=3 actual=%d", get_next_states(3)(0))
assert(get_next_states(3)(1) === 2.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=2 actual=%d", get_next_states(3)(1))
assert(get_next_states(3)(2) === 1.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=1 actual=%d", get_next_states(3)(2))
}
case 4 => {
assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0))
assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1))
assert(get_replace_ways(2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=0 actual=%d", get_replace_ways(2))
assert(get_replace_ways(3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=1 actual=%d", get_replace_ways(3))
assert(get_replace_ways(4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=4: expected=2 actual=%d", get_replace_ways(4))
assert(get_replace_ways(5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=5: expected=2 actual=%d", get_replace_ways(5))
assert(get_replace_ways(6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=6: expected=3 actual=%d", get_replace_ways(6))
assert(get_replace_ways(7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=7: expected=3 actual=%d", get_replace_ways(7))
assert(get_next_states(0)(0) === 5.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=5 actual=%d", get_next_states(0)(0))
assert(get_next_states(0)(1) === 4.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=4 actual=%d", get_next_states(0)(1))
assert(get_next_states(0)(2) === 2.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=2 actual=%d", get_next_states(0)(2))
assert(get_next_states(0)(3) === 0.U(plru.nBits.W), s"get_next_state state=0 way=3: expected=0 actual=%d", get_next_states(0)(3))
assert(get_next_states(1)(0) === 5.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=5 actual=%d", get_next_states(1)(0))
assert(get_next_states(1)(1) === 4.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=4 actual=%d", get_next_states(1)(1))
assert(get_next_states(1)(2) === 3.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=3 actual=%d", get_next_states(1)(2))
assert(get_next_states(1)(3) === 1.U(plru.nBits.W), s"get_next_state state=1 way=3: expected=1 actual=%d", get_next_states(1)(3))
assert(get_next_states(2)(0) === 7.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=7 actual=%d", get_next_states(2)(0))
assert(get_next_states(2)(1) === 6.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=6 actual=%d", get_next_states(2)(1))
assert(get_next_states(2)(2) === 2.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=2 actual=%d", get_next_states(2)(2))
assert(get_next_states(2)(3) === 0.U(plru.nBits.W), s"get_next_state state=2 way=3: expected=0 actual=%d", get_next_states(2)(3))
assert(get_next_states(3)(0) === 7.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=7 actual=%d", get_next_states(3)(0))
assert(get_next_states(3)(1) === 6.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=6 actual=%d", get_next_states(3)(1))
assert(get_next_states(3)(2) === 3.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=3 actual=%d", get_next_states(3)(2))
assert(get_next_states(3)(3) === 1.U(plru.nBits.W), s"get_next_state state=3 way=3: expected=1 actual=%d", get_next_states(3)(3))
assert(get_next_states(4)(0) === 5.U(plru.nBits.W), s"get_next_state state=4 way=0: expected=5 actual=%d", get_next_states(4)(0))
assert(get_next_states(4)(1) === 4.U(plru.nBits.W), s"get_next_state state=4 way=1: expected=4 actual=%d", get_next_states(4)(1))
assert(get_next_states(4)(2) === 2.U(plru.nBits.W), s"get_next_state state=4 way=2: expected=2 actual=%d", get_next_states(4)(2))
assert(get_next_states(4)(3) === 0.U(plru.nBits.W), s"get_next_state state=4 way=3: expected=0 actual=%d", get_next_states(4)(3))
assert(get_next_states(5)(0) === 5.U(plru.nBits.W), s"get_next_state state=5 way=0: expected=5 actual=%d", get_next_states(5)(0))
assert(get_next_states(5)(1) === 4.U(plru.nBits.W), s"get_next_state state=5 way=1: expected=4 actual=%d", get_next_states(5)(1))
assert(get_next_states(5)(2) === 3.U(plru.nBits.W), s"get_next_state state=5 way=2: expected=3 actual=%d", get_next_states(5)(2))
assert(get_next_states(5)(3) === 1.U(plru.nBits.W), s"get_next_state state=5 way=3: expected=1 actual=%d", get_next_states(5)(3))
assert(get_next_states(6)(0) === 7.U(plru.nBits.W), s"get_next_state state=6 way=0: expected=7 actual=%d", get_next_states(6)(0))
assert(get_next_states(6)(1) === 6.U(plru.nBits.W), s"get_next_state state=6 way=1: expected=6 actual=%d", get_next_states(6)(1))
assert(get_next_states(6)(2) === 2.U(plru.nBits.W), s"get_next_state state=6 way=2: expected=2 actual=%d", get_next_states(6)(2))
assert(get_next_states(6)(3) === 0.U(plru.nBits.W), s"get_next_state state=6 way=3: expected=0 actual=%d", get_next_states(6)(3))
assert(get_next_states(7)(0) === 7.U(plru.nBits.W), s"get_next_state state=7 way=0: expected=7 actual=%d", get_next_states(7)(0))
assert(get_next_states(7)(1) === 6.U(plru.nBits.W), s"get_next_state state=7 way=5: expected=6 actual=%d", get_next_states(7)(1))
assert(get_next_states(7)(2) === 3.U(plru.nBits.W), s"get_next_state state=7 way=2: expected=3 actual=%d", get_next_states(7)(2))
assert(get_next_states(7)(3) === 1.U(plru.nBits.W), s"get_next_state state=7 way=3: expected=1 actual=%d", get_next_states(7)(3))
}
case 5 => {
assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0))
assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1))
assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2))
assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3))
assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4))
assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5))
assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6))
assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7))
assert(get_replace_ways( 8) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=4 actual=%d", get_replace_ways( 8))
assert(get_replace_ways( 9) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=4 actual=%d", get_replace_ways( 9))
assert(get_replace_ways(10) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=4 actual=%d", get_replace_ways(10))
assert(get_replace_ways(11) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=4 actual=%d", get_replace_ways(11))
assert(get_replace_ways(12) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=4 actual=%d", get_replace_ways(12))
assert(get_replace_ways(13) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=4 actual=%d", get_replace_ways(13))
assert(get_replace_ways(14) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=4 actual=%d", get_replace_ways(14))
assert(get_replace_ways(15) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=4 actual=%d", get_replace_ways(15))
assert(get_next_states( 0)(0) === 13.U(plru.nBits.W), s"get_next_state state=00 way=0: expected=13 actual=%d", get_next_states( 0)(0))
assert(get_next_states( 0)(1) === 12.U(plru.nBits.W), s"get_next_state state=00 way=1: expected=12 actual=%d", get_next_states( 0)(1))
assert(get_next_states( 0)(2) === 10.U(plru.nBits.W), s"get_next_state state=00 way=2: expected=10 actual=%d", get_next_states( 0)(2))
assert(get_next_states( 0)(3) === 8.U(plru.nBits.W), s"get_next_state state=00 way=3: expected=08 actual=%d", get_next_states( 0)(3))
assert(get_next_states( 0)(4) === 0.U(plru.nBits.W), s"get_next_state state=00 way=4: expected=00 actual=%d", get_next_states( 0)(4))
assert(get_next_states( 1)(0) === 13.U(plru.nBits.W), s"get_next_state state=01 way=0: expected=13 actual=%d", get_next_states( 1)(0))
assert(get_next_states( 1)(1) === 12.U(plru.nBits.W), s"get_next_state state=01 way=1: expected=12 actual=%d", get_next_states( 1)(1))
assert(get_next_states( 1)(2) === 11.U(plru.nBits.W), s"get_next_state state=01 way=2: expected=11 actual=%d", get_next_states( 1)(2))
assert(get_next_states( 1)(3) === 9.U(plru.nBits.W), s"get_next_state state=01 way=3: expected=09 actual=%d", get_next_states( 1)(3))
assert(get_next_states( 1)(4) === 1.U(plru.nBits.W), s"get_next_state state=01 way=4: expected=01 actual=%d", get_next_states( 1)(4))
assert(get_next_states( 2)(0) === 15.U(plru.nBits.W), s"get_next_state state=02 way=0: expected=15 actual=%d", get_next_states( 2)(0))
assert(get_next_states( 2)(1) === 14.U(plru.nBits.W), s"get_next_state state=02 way=1: expected=14 actual=%d", get_next_states( 2)(1))
assert(get_next_states( 2)(2) === 10.U(plru.nBits.W), s"get_next_state state=02 way=2: expected=10 actual=%d", get_next_states( 2)(2))
assert(get_next_states( 2)(3) === 8.U(plru.nBits.W), s"get_next_state state=02 way=3: expected=08 actual=%d", get_next_states( 2)(3))
assert(get_next_states( 2)(4) === 2.U(plru.nBits.W), s"get_next_state state=02 way=4: expected=02 actual=%d", get_next_states( 2)(4))
assert(get_next_states( 3)(0) === 15.U(plru.nBits.W), s"get_next_state state=03 way=0: expected=15 actual=%d", get_next_states( 3)(0))
assert(get_next_states( 3)(1) === 14.U(plru.nBits.W), s"get_next_state state=03 way=1: expected=14 actual=%d", get_next_states( 3)(1))
assert(get_next_states( 3)(2) === 11.U(plru.nBits.W), s"get_next_state state=03 way=2: expected=11 actual=%d", get_next_states( 3)(2))
assert(get_next_states( 3)(3) === 9.U(plru.nBits.W), s"get_next_state state=03 way=3: expected=09 actual=%d", get_next_states( 3)(3))
assert(get_next_states( 3)(4) === 3.U(plru.nBits.W), s"get_next_state state=03 way=4: expected=03 actual=%d", get_next_states( 3)(4))
assert(get_next_states( 4)(0) === 13.U(plru.nBits.W), s"get_next_state state=04 way=0: expected=13 actual=%d", get_next_states( 4)(0))
assert(get_next_states( 4)(1) === 12.U(plru.nBits.W), s"get_next_state state=04 way=1: expected=12 actual=%d", get_next_states( 4)(1))
assert(get_next_states( 4)(2) === 10.U(plru.nBits.W), s"get_next_state state=04 way=2: expected=10 actual=%d", get_next_states( 4)(2))
assert(get_next_states( 4)(3) === 8.U(plru.nBits.W), s"get_next_state state=04 way=3: expected=08 actual=%d", get_next_states( 4)(3))
assert(get_next_states( 4)(4) === 4.U(plru.nBits.W), s"get_next_state state=04 way=4: expected=04 actual=%d", get_next_states( 4)(4))
assert(get_next_states( 5)(0) === 13.U(plru.nBits.W), s"get_next_state state=05 way=0: expected=13 actual=%d", get_next_states( 5)(0))
assert(get_next_states( 5)(1) === 12.U(plru.nBits.W), s"get_next_state state=05 way=1: expected=12 actual=%d", get_next_states( 5)(1))
assert(get_next_states( 5)(2) === 11.U(plru.nBits.W), s"get_next_state state=05 way=2: expected=11 actual=%d", get_next_states( 5)(2))
assert(get_next_states( 5)(3) === 9.U(plru.nBits.W), s"get_next_state state=05 way=3: expected=09 actual=%d", get_next_states( 5)(3))
assert(get_next_states( 5)(4) === 5.U(plru.nBits.W), s"get_next_state state=05 way=4: expected=05 actual=%d", get_next_states( 5)(4))
assert(get_next_states( 6)(0) === 15.U(plru.nBits.W), s"get_next_state state=06 way=0: expected=15 actual=%d", get_next_states( 6)(0))
assert(get_next_states( 6)(1) === 14.U(plru.nBits.W), s"get_next_state state=06 way=1: expected=14 actual=%d", get_next_states( 6)(1))
assert(get_next_states( 6)(2) === 10.U(plru.nBits.W), s"get_next_state state=06 way=2: expected=10 actual=%d", get_next_states( 6)(2))
assert(get_next_states( 6)(3) === 8.U(plru.nBits.W), s"get_next_state state=06 way=3: expected=08 actual=%d", get_next_states( 6)(3))
assert(get_next_states( 6)(4) === 6.U(plru.nBits.W), s"get_next_state state=06 way=4: expected=06 actual=%d", get_next_states( 6)(4))
assert(get_next_states( 7)(0) === 15.U(plru.nBits.W), s"get_next_state state=07 way=0: expected=15 actual=%d", get_next_states( 7)(0))
assert(get_next_states( 7)(1) === 14.U(plru.nBits.W), s"get_next_state state=07 way=5: expected=14 actual=%d", get_next_states( 7)(1))
assert(get_next_states( 7)(2) === 11.U(plru.nBits.W), s"get_next_state state=07 way=2: expected=11 actual=%d", get_next_states( 7)(2))
assert(get_next_states( 7)(3) === 9.U(plru.nBits.W), s"get_next_state state=07 way=3: expected=09 actual=%d", get_next_states( 7)(3))
assert(get_next_states( 7)(4) === 7.U(plru.nBits.W), s"get_next_state state=07 way=4: expected=07 actual=%d", get_next_states( 7)(4))
assert(get_next_states( 8)(0) === 13.U(plru.nBits.W), s"get_next_state state=08 way=0: expected=13 actual=%d", get_next_states( 8)(0))
assert(get_next_states( 8)(1) === 12.U(plru.nBits.W), s"get_next_state state=08 way=1: expected=12 actual=%d", get_next_states( 8)(1))
assert(get_next_states( 8)(2) === 10.U(plru.nBits.W), s"get_next_state state=08 way=2: expected=10 actual=%d", get_next_states( 8)(2))
assert(get_next_states( 8)(3) === 8.U(plru.nBits.W), s"get_next_state state=08 way=3: expected=08 actual=%d", get_next_states( 8)(3))
assert(get_next_states( 8)(4) === 0.U(plru.nBits.W), s"get_next_state state=08 way=4: expected=00 actual=%d", get_next_states( 8)(4))
assert(get_next_states( 9)(0) === 13.U(plru.nBits.W), s"get_next_state state=09 way=0: expected=13 actual=%d", get_next_states( 9)(0))
assert(get_next_states( 9)(1) === 12.U(plru.nBits.W), s"get_next_state state=09 way=1: expected=12 actual=%d", get_next_states( 9)(1))
assert(get_next_states( 9)(2) === 11.U(plru.nBits.W), s"get_next_state state=09 way=2: expected=11 actual=%d", get_next_states( 9)(2))
assert(get_next_states( 9)(3) === 9.U(plru.nBits.W), s"get_next_state state=09 way=3: expected=09 actual=%d", get_next_states( 9)(3))
assert(get_next_states( 9)(4) === 1.U(plru.nBits.W), s"get_next_state state=09 way=4: expected=01 actual=%d", get_next_states( 9)(4))
assert(get_next_states(10)(0) === 15.U(plru.nBits.W), s"get_next_state state=10 way=0: expected=15 actual=%d", get_next_states(10)(0))
assert(get_next_states(10)(1) === 14.U(plru.nBits.W), s"get_next_state state=10 way=1: expected=14 actual=%d", get_next_states(10)(1))
assert(get_next_states(10)(2) === 10.U(plru.nBits.W), s"get_next_state state=10 way=2: expected=10 actual=%d", get_next_states(10)(2))
assert(get_next_states(10)(3) === 8.U(plru.nBits.W), s"get_next_state state=10 way=3: expected=08 actual=%d", get_next_states(10)(3))
assert(get_next_states(10)(4) === 2.U(plru.nBits.W), s"get_next_state state=10 way=4: expected=02 actual=%d", get_next_states(10)(4))
assert(get_next_states(11)(0) === 15.U(plru.nBits.W), s"get_next_state state=11 way=0: expected=15 actual=%d", get_next_states(11)(0))
assert(get_next_states(11)(1) === 14.U(plru.nBits.W), s"get_next_state state=11 way=1: expected=14 actual=%d", get_next_states(11)(1))
assert(get_next_states(11)(2) === 11.U(plru.nBits.W), s"get_next_state state=11 way=2: expected=11 actual=%d", get_next_states(11)(2))
assert(get_next_states(11)(3) === 9.U(plru.nBits.W), s"get_next_state state=11 way=3: expected=09 actual=%d", get_next_states(11)(3))
assert(get_next_states(11)(4) === 3.U(plru.nBits.W), s"get_next_state state=11 way=4: expected=03 actual=%d", get_next_states(11)(4))
assert(get_next_states(12)(0) === 13.U(plru.nBits.W), s"get_next_state state=12 way=0: expected=13 actual=%d", get_next_states(12)(0))
assert(get_next_states(12)(1) === 12.U(plru.nBits.W), s"get_next_state state=12 way=1: expected=12 actual=%d", get_next_states(12)(1))
assert(get_next_states(12)(2) === 10.U(plru.nBits.W), s"get_next_state state=12 way=2: expected=10 actual=%d", get_next_states(12)(2))
assert(get_next_states(12)(3) === 8.U(plru.nBits.W), s"get_next_state state=12 way=3: expected=08 actual=%d", get_next_states(12)(3))
assert(get_next_states(12)(4) === 4.U(plru.nBits.W), s"get_next_state state=12 way=4: expected=04 actual=%d", get_next_states(12)(4))
assert(get_next_states(13)(0) === 13.U(plru.nBits.W), s"get_next_state state=13 way=0: expected=13 actual=%d", get_next_states(13)(0))
assert(get_next_states(13)(1) === 12.U(plru.nBits.W), s"get_next_state state=13 way=1: expected=12 actual=%d", get_next_states(13)(1))
assert(get_next_states(13)(2) === 11.U(plru.nBits.W), s"get_next_state state=13 way=2: expected=11 actual=%d", get_next_states(13)(2))
assert(get_next_states(13)(3) === 9.U(plru.nBits.W), s"get_next_state state=13 way=3: expected=09 actual=%d", get_next_states(13)(3))
assert(get_next_states(13)(4) === 5.U(plru.nBits.W), s"get_next_state state=13 way=4: expected=05 actual=%d", get_next_states(13)(4))
assert(get_next_states(14)(0) === 15.U(plru.nBits.W), s"get_next_state state=14 way=0: expected=15 actual=%d", get_next_states(14)(0))
assert(get_next_states(14)(1) === 14.U(plru.nBits.W), s"get_next_state state=14 way=1: expected=14 actual=%d", get_next_states(14)(1))
assert(get_next_states(14)(2) === 10.U(plru.nBits.W), s"get_next_state state=14 way=2: expected=10 actual=%d", get_next_states(14)(2))
assert(get_next_states(14)(3) === 8.U(plru.nBits.W), s"get_next_state state=14 way=3: expected=08 actual=%d", get_next_states(14)(3))
assert(get_next_states(14)(4) === 6.U(plru.nBits.W), s"get_next_state state=14 way=4: expected=06 actual=%d", get_next_states(14)(4))
assert(get_next_states(15)(0) === 15.U(plru.nBits.W), s"get_next_state state=15 way=0: expected=15 actual=%d", get_next_states(15)(0))
assert(get_next_states(15)(1) === 14.U(plru.nBits.W), s"get_next_state state=15 way=5: expected=14 actual=%d", get_next_states(15)(1))
assert(get_next_states(15)(2) === 11.U(plru.nBits.W), s"get_next_state state=15 way=2: expected=11 actual=%d", get_next_states(15)(2))
assert(get_next_states(15)(3) === 9.U(plru.nBits.W), s"get_next_state state=15 way=3: expected=09 actual=%d", get_next_states(15)(3))
assert(get_next_states(15)(4) === 7.U(plru.nBits.W), s"get_next_state state=15 way=4: expected=07 actual=%d", get_next_states(15)(4))
}
case 6 => {
assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0))
assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1))
assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2))
assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3))
assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4))
assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5))
assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6))
assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7))
assert(get_replace_ways( 8) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=0 actual=%d", get_replace_ways( 8))
assert(get_replace_ways( 9) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=1 actual=%d", get_replace_ways( 9))
assert(get_replace_ways(10) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=0 actual=%d", get_replace_ways(10))
assert(get_replace_ways(11) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=1 actual=%d", get_replace_ways(11))
assert(get_replace_ways(12) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=2 actual=%d", get_replace_ways(12))
assert(get_replace_ways(13) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=2 actual=%d", get_replace_ways(13))
assert(get_replace_ways(14) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=3 actual=%d", get_replace_ways(14))
assert(get_replace_ways(15) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=3 actual=%d", get_replace_ways(15))
assert(get_replace_ways(16) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=16: expected=4 actual=%d", get_replace_ways(16))
assert(get_replace_ways(17) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=17: expected=4 actual=%d", get_replace_ways(17))
assert(get_replace_ways(18) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=18: expected=4 actual=%d", get_replace_ways(18))
assert(get_replace_ways(19) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=19: expected=4 actual=%d", get_replace_ways(19))
assert(get_replace_ways(20) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=20: expected=4 actual=%d", get_replace_ways(20))
assert(get_replace_ways(21) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=21: expected=4 actual=%d", get_replace_ways(21))
assert(get_replace_ways(22) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=22: expected=4 actual=%d", get_replace_ways(22))
assert(get_replace_ways(23) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=23: expected=4 actual=%d", get_replace_ways(23))
assert(get_replace_ways(24) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=24: expected=5 actual=%d", get_replace_ways(24))
assert(get_replace_ways(25) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=25: expected=5 actual=%d", get_replace_ways(25))
assert(get_replace_ways(26) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=26: expected=5 actual=%d", get_replace_ways(26))
assert(get_replace_ways(27) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=27: expected=5 actual=%d", get_replace_ways(27))
assert(get_replace_ways(28) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=28: expected=5 actual=%d", get_replace_ways(28))
assert(get_replace_ways(29) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=29: expected=5 actual=%d", get_replace_ways(29))
assert(get_replace_ways(30) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=30: expected=5 actual=%d", get_replace_ways(30))
assert(get_replace_ways(31) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=31: expected=5 actual=%d", get_replace_ways(31))
}
case _ => throw new IllegalArgumentException(s"no test pattern found for n_ways=$n_ways")
}
}
File HellaCache.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3.{dontTouch, _}
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.bundlebridge._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.AMBAProtField
import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType}
import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters}
import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata}
import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle}
import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt}
import scala.collection.mutable.ListBuffer
case class DCacheParams(
nSets: Int = 64,
nWays: Int = 4,
rowBits: Int = 64,
subWordBits: Option[Int] = None,
replacementPolicy: String = "random",
nTLBSets: Int = 1,
nTLBWays: Int = 32,
nTLBBasePageSectors: Int = 4,
nTLBSuperpages: Int = 4,
tagECC: Option[String] = None,
dataECC: Option[String] = None,
dataECCBytes: Int = 1,
nMSHRs: Int = 1,
nSDQ: Int = 17,
nRPQ: Int = 16,
nMMIOs: Int = 1,
blockBytes: Int = 64,
separateUncachedResp: Boolean = false,
acquireBeforeRelease: Boolean = false,
pipelineWayMux: Boolean = false,
clockGate: Boolean = false,
scratch: Option[BigInt] = None) extends L1CacheParams {
def tagCode: Code = Code.fromString(tagECC)
def dataCode: Code = Code.fromString(dataECC)
def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0)
def replacement = new RandomReplacement(nWays)
def silentDrop: Boolean = !acquireBeforeRelease
require((!scratch.isDefined || nWays == 1),
"Scratchpad only allowed in direct-mapped cache.")
require((!scratch.isDefined || nMSHRs == 0),
"Scratchpad only allowed in blocking cache.")
if (scratch.isEmpty)
require(isPow2(nSets), s"nSets($nSets) must be pow2")
}
trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters {
val cacheParams = tileParams.dcache.get
val cfg = cacheParams
def wordBits = coreDataBits
def wordBytes = coreDataBytes
def subWordBits = cacheParams.subWordBits.getOrElse(wordBits)
def subWordBytes = subWordBits / 8
def wordOffBits = log2Up(wordBytes)
def beatBytes = cacheBlockBytes / cacheDataBeats
def beatWords = beatBytes / wordBytes
def beatOffBits = log2Up(beatBytes)
def idxMSB = untagBits-1
def idxLSB = blockOffBits
def offsetmsb = idxLSB-1
def offsetlsb = wordOffBits
def rowWords = rowBits/wordBits
def doNarrowRead = coreDataBits * nWays % rowBits == 0
def eccBytes = cacheParams.dataECCBytes
val eccBits = cacheParams.dataECCBytes * 8
val encBits = cacheParams.dataCode.width(eccBits)
val encWordBits = encBits * (wordBits / eccBits)
def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only
def encRowBits = encDataBits*rowWords
def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed
def lrscBackoff = 3 // disallow LRSC reacquisition briefly
def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant
def nIOMSHRs = cacheParams.nMMIOs
def maxUncachedInFlight = cacheParams.nMMIOs
def dataScratchpadSize = cacheParams.dataScratchpadBytes
require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)")
if (!usingDataScratchpad)
require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)")
// would need offset addr for puts if data width < xlen
require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)")
}
abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module
with HasL1HellaCacheParameters
abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p)
with HasL1HellaCacheParameters
/** Bundle definitions for HellaCache interfaces */
trait HasCoreMemOp extends HasL1HellaCacheParameters {
val addr = UInt(coreMaxAddrBits.W)
val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W))
val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W)
val cmd = UInt(M_SZ.W)
val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W)
val signed = Bool()
val dprv = UInt(PRV.SZ.W)
val dv = Bool()
}
trait HasCoreData extends HasCoreParameters {
val data = UInt(coreDataBits.W)
val mask = UInt(coreDataBytes.W)
}
class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp {
val phys = Bool()
val no_resp = Bool() // The dcache may omit generating a response for this request
val no_alloc = Bool()
val no_xcpt = Bool()
}
class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData
class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreMemOp
with HasCoreData {
val replay = Bool()
val has_data = Bool()
val data_word_bypass = UInt(coreDataBits.W)
val data_raw = UInt(coreDataBits.W)
val store_data = UInt(coreDataBits.W)
}
class AlignmentExceptions extends Bundle {
val ld = Bool()
val st = Bool()
}
class HellaCacheExceptions extends Bundle {
val ma = new AlignmentExceptions
val pf = new AlignmentExceptions
val gf = new AlignmentExceptions
val ae = new AlignmentExceptions
}
class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData
class HellaCachePerfEvents extends Bundle {
val acquire = Bool()
val release = Bool()
val grant = Bool()
val tlbMiss = Bool()
val blocked = Bool()
val canAcceptStoreThenLoad = Bool()
val canAcceptStoreThenRMW = Bool()
val canAcceptLoadThenLoad = Bool()
val storeBufferEmptyAfterLoad = Bool()
val storeBufferEmptyAfterStore = Bool()
}
// interface between D$ and processor/DTLB
class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) {
val req = Decoupled(new HellaCacheReq)
val s1_kill = Output(Bool()) // kill previous cycle's req
val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req
val s2_nack = Input(Bool()) // req from two cycles ago is rejected
val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint)
val s2_kill = Output(Bool()) // kill req from two cycles ago
val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO
val s2_paddr = Input(UInt(paddrBits.W)) // translated address
val resp = Flipped(Valid(new HellaCacheResp))
val replay_next = Input(Bool())
val s2_xcpt = Input(new HellaCacheExceptions)
val s2_gpa = Input(UInt(vaddrBitsExtended.W))
val s2_gpa_is_pte = Input(Bool())
val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp)))
val ordered = Input(Bool())
val store_pending = Input(Bool()) // there is a store in a store buffer somewhere
val perf = Input(new HellaCachePerfEvents())
val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself?
val clock_enabled = Input(Bool()) // is D$ currently being clocked?
}
/** Base classes for Diplomatic TL2 HellaCaches */
abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule
with HasNonDiplomaticTileParameters {
protected val cfg = tileParams.dcache.get
protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache",
sourceId = IdRange(0, 1 max cfg.nMSHRs),
supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes))))
protected def mmioClientParameters = Seq(TLMasterParameters.v1(
name = s"Core ${tileId} DCache MMIO",
sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs),
requestFifo = true))
def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
clients = cacheClientParameters ++ mmioClientParameters,
minLatency = 1,
requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField())))))
val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]())
val module: HellaCacheModule
def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT)
def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits)
require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$")
}
class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) {
val cpu = Flipped(new HellaCacheIO)
val ptw = new TLBPTWIO()
val errors = new DCacheErrors
val tlb_port = new DCacheTLBPort
}
class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer)
with HasL1HellaCacheParameters {
implicit val edge: TLEdgeOut = outer.node.edges.out(0)
val (tl_out, _) = outer.node.out(0)
val io = IO(new HellaCacheBundle)
val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle)
val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle)
dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals
dontTouch(io.cpu.s1_data)
require(rowBits == edge.bundle.dataBits)
private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile)
fifoManagers.foreach { m =>
require (m.fifoId == fifoManagers.head.fifoId,
s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+
s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}")
}
}
/** Support overriding which HellaCache is instantiated */
case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply)
object HellaCacheFactory {
def apply(tile: BaseTile)(p: Parameters): HellaCache = {
if (tile.tileParams.dcache.get.nMSHRs == 0)
new DCache(tile.tileId, tile.crossing)(p)
else
new NonBlockingDCache(tile.tileId)(p)
}
}
/** Mix-ins for constructing tiles that have a HellaCache */
trait HasHellaCache { this: BaseTile =>
val module: HasHellaCacheModule
implicit val p: Parameters
var nDCachePorts = 0
lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p))
tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node
dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode }
dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode }
InModuleBody {
dcache.module.io.tlb_port := DontCare
}
}
trait HasHellaCacheModule {
val outer: HasHellaCache with HasTileParameters
implicit val p: Parameters
val dcachePorts = ListBuffer[HellaCacheIO]()
val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p))
outer.dcache.module.io.cpu <> dcacheArb.io.mem
}
/** Metadata array used for all HellaCaches */
class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val coh = new ClientMetadata
val tag = UInt(tagBits.W)
}
object L1Metadata {
def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = {
val meta = Wire(new L1Metadata)
meta.tag := tag
meta.coh := coh
meta
}
}
class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val idx = UInt(idxBits.W)
val way_en = UInt(nWays.W)
val tag = UInt(tagBits.W)
}
class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) {
val data = new L1Metadata
}
class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val rstVal = onReset()
val io = IO(new Bundle {
val read = Flipped(Decoupled(new L1MetaReadReq))
val write = Flipped(Decoupled(new L1MetaWriteReq))
val resp = Output(Vec(nWays, rstVal.cloneType))
})
val rst_cnt = RegInit(0.U(log2Up(nSets+1).W))
val rst = rst_cnt < nSets.U
val waddr = Mux(rst, rst_cnt, io.write.bits.idx)
val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt
val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools
val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools
when (rst) { rst_cnt := rst_cnt+1.U }
val metabits = rstVal.getWidth
val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W)))
val wen = rst || io.write.valid
when (wen) {
tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask)
}
io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal)))
io.read.ready := !wen // so really this could be a 6T RAM
io.write.ready := !rst
}
File Consts.scala:
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket.constants
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
trait ScalarOpConstants {
val SZ_BR = 3
def BR_X = BitPat("b???")
def BR_EQ = 0.U(3.W)
def BR_NE = 1.U(3.W)
def BR_J = 2.U(3.W)
def BR_N = 3.U(3.W)
def BR_LT = 4.U(3.W)
def BR_GE = 5.U(3.W)
def BR_LTU = 6.U(3.W)
def BR_GEU = 7.U(3.W)
def A1_X = BitPat("b??")
def A1_ZERO = 0.U(2.W)
def A1_RS1 = 1.U(2.W)
def A1_PC = 2.U(2.W)
def A1_RS1SHL = 3.U(2.W)
def IMM_X = BitPat("b???")
def IMM_S = 0.U(3.W)
def IMM_SB = 1.U(3.W)
def IMM_U = 2.U(3.W)
def IMM_UJ = 3.U(3.W)
def IMM_I = 4.U(3.W)
def IMM_Z = 5.U(3.W)
def A2_X = BitPat("b???")
def A2_ZERO = 0.U(3.W)
def A2_SIZE = 1.U(3.W)
def A2_RS2 = 2.U(3.W)
def A2_IMM = 3.U(3.W)
def A2_RS2OH = 4.U(3.W)
def A2_IMMOH = 5.U(3.W)
def X = BitPat("b?")
def N = BitPat("b0")
def Y = BitPat("b1")
val SZ_DW = 1
def DW_X = X
def DW_32 = false.B
def DW_64 = true.B
def DW_XPR = DW_64
}
trait MemoryOpConstants {
val NUM_XA_OPS = 9
val M_SZ = 5
def M_X = BitPat("b?????");
def M_XRD = "b00000".U; // int load
def M_XWR = "b00001".U; // int store
def M_PFR = "b00010".U; // prefetch with intent to read
def M_PFW = "b00011".U; // prefetch with intent to write
def M_XA_SWAP = "b00100".U
def M_FLUSH_ALL = "b00101".U // flush all lines
def M_XLR = "b00110".U
def M_XSC = "b00111".U
def M_XA_ADD = "b01000".U
def M_XA_XOR = "b01001".U
def M_XA_OR = "b01010".U
def M_XA_AND = "b01011".U
def M_XA_MIN = "b01100".U
def M_XA_MAX = "b01101".U
def M_XA_MINU = "b01110".U
def M_XA_MAXU = "b01111".U
def M_FLUSH = "b10000".U // write back dirty data and cede R/W permissions
def M_PWR = "b10001".U // partial (masked) store
def M_PRODUCE = "b10010".U // write back dirty data and cede W permissions
def M_CLEAN = "b10011".U // write back dirty data and retain R/W permissions
def M_SFENCE = "b10100".U // SFENCE.VMA
def M_HFENCEV = "b10101".U // HFENCE.VVMA
def M_HFENCEG = "b10110".U // HFENCE.GVMA
def M_WOK = "b10111".U // check write permissions but don't perform a write
def M_HLVX = "b10000".U // HLVX instruction
def isAMOLogical(cmd: UInt) = cmd.isOneOf(M_XA_SWAP, M_XA_XOR, M_XA_OR, M_XA_AND)
def isAMOArithmetic(cmd: UInt) = cmd.isOneOf(M_XA_ADD, M_XA_MIN, M_XA_MAX, M_XA_MINU, M_XA_MAXU)
def isAMO(cmd: UInt) = isAMOLogical(cmd) || isAMOArithmetic(cmd)
def isPrefetch(cmd: UInt) = cmd === M_PFR || cmd === M_PFW
def isRead(cmd: UInt) = cmd.isOneOf(M_XRD, M_HLVX, M_XLR, M_XSC) || isAMO(cmd)
def isWrite(cmd: UInt) = cmd === M_XWR || cmd === M_PWR || cmd === M_XSC || isAMO(cmd)
def isWriteIntent(cmd: UInt) = isWrite(cmd) || cmd === M_PFW || cmd === M_XLR
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File TLB.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.devices.debug.DebugModuleKey
import freechips.rocketchip.diplomacy.RegionType
import freechips.rocketchip.subsystem.CacheBlockBytes
import freechips.rocketchip.tile.{CoreModule, CoreBundle}
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util.{OptimizationBarrier, SetAssocLRU, PseudoLRU, PopCountAtLeast, property}
import freechips.rocketchip.util.BooleanToAugmentedBoolean
import freechips.rocketchip.util.IntToAugmentedInt
import freechips.rocketchip.util.UIntToAugmentedUInt
import freechips.rocketchip.util.UIntIsOneOf
import freechips.rocketchip.util.SeqToAugmentedSeq
import freechips.rocketchip.util.SeqBoolBitwiseOps
case object ASIdBits extends Field[Int](0)
case object VMIdBits extends Field[Int](0)
/** =SFENCE=
* rs1 rs2
* {{{
* 0 0 -> flush All
* 0 1 -> flush by ASID
* 1 1 -> flush by ADDR
* 1 0 -> flush by ADDR and ASID
* }}}
* {{{
* If rs1=x0 and rs2=x0, the fence orders all reads and writes made to any level of the page tables, for all address spaces.
* If rs1=x0 and rs2!=x0, the fence orders all reads and writes made to any level of the page tables, but only for the address space identified by integer register rs2. Accesses to global mappings (see Section 4.3.1) are not ordered.
* If rs1!=x0 and rs2=x0, the fence orders only reads and writes made to the leaf page table entry corresponding to the virtual address in rs1, for all address spaces.
* If rs1!=x0 and rs2!=x0, the fence orders only reads and writes made to the leaf page table entry corresponding to the virtual address in rs1, for the address space identified by integer register rs2. Accesses to global mappings are not ordered.
* }}}
*/
class SFenceReq(implicit p: Parameters) extends CoreBundle()(p) {
val rs1 = Bool()
val rs2 = Bool()
val addr = UInt(vaddrBits.W)
val asid = UInt((asIdBits max 1).W) // TODO zero-width
val hv = Bool()
val hg = Bool()
}
class TLBReq(lgMaxSize: Int)(implicit p: Parameters) extends CoreBundle()(p) {
/** request address from CPU. */
val vaddr = UInt(vaddrBitsExtended.W)
/** don't lookup TLB, bypass vaddr as paddr */
val passthrough = Bool()
/** granularity */
val size = UInt(log2Ceil(lgMaxSize + 1).W)
/** memory command. */
val cmd = Bits(M_SZ.W)
val prv = UInt(PRV.SZ.W)
/** virtualization mode */
val v = Bool()
}
class TLBExceptions extends Bundle {
val ld = Bool()
val st = Bool()
val inst = Bool()
}
class TLBResp(lgMaxSize: Int = 3)(implicit p: Parameters) extends CoreBundle()(p) {
// lookup responses
val miss = Bool()
/** physical address */
val paddr = UInt(paddrBits.W)
val gpa = UInt(vaddrBitsExtended.W)
val gpa_is_pte = Bool()
/** page fault exception */
val pf = new TLBExceptions
/** guest page fault exception */
val gf = new TLBExceptions
/** access exception */
val ae = new TLBExceptions
/** misaligned access exception */
val ma = new TLBExceptions
/** if this address is cacheable */
val cacheable = Bool()
/** if caches must allocate this address */
val must_alloc = Bool()
/** if this address is prefetchable for caches*/
val prefetchable = Bool()
/** size/cmd of request that generated this response*/
val size = UInt(log2Ceil(lgMaxSize + 1).W)
val cmd = UInt(M_SZ.W)
}
class TLBEntryData(implicit p: Parameters) extends CoreBundle()(p) {
val ppn = UInt(ppnBits.W)
/** pte.u user */
val u = Bool()
/** pte.g global */
val g = Bool()
/** access exception.
* D$ -> PTW -> TLB AE
* Alignment failed.
*/
val ae_ptw = Bool()
val ae_final = Bool()
val ae_stage2 = Bool()
/** page fault */
val pf = Bool()
/** guest page fault */
val gf = Bool()
/** supervisor write */
val sw = Bool()
/** supervisor execute */
val sx = Bool()
/** supervisor read */
val sr = Bool()
/** hypervisor write */
val hw = Bool()
/** hypervisor excute */
val hx = Bool()
/** hypervisor read */
val hr = Bool()
/** prot_w */
val pw = Bool()
/** prot_x */
val px = Bool()
/** prot_r */
val pr = Bool()
/** PutPartial */
val ppp = Bool()
/** AMO logical */
val pal = Bool()
/** AMO arithmetic */
val paa = Bool()
/** get/put effects */
val eff = Bool()
/** cacheable */
val c = Bool()
/** fragmented_superpage support */
val fragmented_superpage = Bool()
}
/** basic cell for TLB data */
class TLBEntry(val nSectors: Int, val superpage: Boolean, val superpageOnly: Boolean)(implicit p: Parameters) extends CoreBundle()(p) {
require(nSectors == 1 || !superpage)
require(!superpageOnly || superpage)
val level = UInt(log2Ceil(pgLevels).W)
/** use vpn as tag */
val tag_vpn = UInt(vpnBits.W)
/** tag in vitualization mode */
val tag_v = Bool()
/** entry data */
val data = Vec(nSectors, UInt(new TLBEntryData().getWidth.W))
/** valid bit */
val valid = Vec(nSectors, Bool())
/** returns all entry data in this entry */
def entry_data = data.map(_.asTypeOf(new TLBEntryData))
/** returns the index of sector */
private def sectorIdx(vpn: UInt) = vpn.extract(nSectors.log2-1, 0)
/** returns the entry data matched with this vpn*/
def getData(vpn: UInt) = OptimizationBarrier(data(sectorIdx(vpn)).asTypeOf(new TLBEntryData))
/** returns whether a sector hits */
def sectorHit(vpn: UInt, virtual: Bool) = valid.orR && sectorTagMatch(vpn, virtual)
/** returns whether tag matches vpn */
def sectorTagMatch(vpn: UInt, virtual: Bool) = (((tag_vpn ^ vpn) >> nSectors.log2) === 0.U) && (tag_v === virtual)
/** returns hit signal */
def hit(vpn: UInt, virtual: Bool): Bool = {
if (superpage && usingVM) {
var tagMatch = valid.head && (tag_v === virtual)
for (j <- 0 until pgLevels) {
val base = (pgLevels - 1 - j) * pgLevelBits
val n = pgLevelBits + (if (j == 0) hypervisorExtraAddrBits else 0)
val ignore = level < j.U || (superpageOnly && j == pgLevels - 1).B
tagMatch = tagMatch && (ignore || (tag_vpn ^ vpn)(base + n - 1, base) === 0.U)
}
tagMatch
} else {
val idx = sectorIdx(vpn)
valid(idx) && sectorTagMatch(vpn, virtual)
}
}
/** returns the ppn of the input TLBEntryData */
def ppn(vpn: UInt, data: TLBEntryData) = {
val supervisorVPNBits = pgLevels * pgLevelBits
if (superpage && usingVM) {
var res = data.ppn >> pgLevelBits*(pgLevels - 1)
for (j <- 1 until pgLevels) {
val ignore = level < j.U || (superpageOnly && j == pgLevels - 1).B
res = Cat(res, (Mux(ignore, vpn, 0.U) | data.ppn)(supervisorVPNBits - j*pgLevelBits - 1, supervisorVPNBits - (j + 1)*pgLevelBits))
}
res
} else {
data.ppn
}
}
/** does the refill
*
* find the target entry with vpn tag
* and replace the target entry with the input entry data
*/
def insert(vpn: UInt, virtual: Bool, level: UInt, entry: TLBEntryData): Unit = {
this.tag_vpn := vpn
this.tag_v := virtual
this.level := level.extract(log2Ceil(pgLevels - superpageOnly.toInt)-1, 0)
val idx = sectorIdx(vpn)
valid(idx) := true.B
data(idx) := entry.asUInt
}
def invalidate(): Unit = { valid.foreach(_ := false.B) }
def invalidate(virtual: Bool): Unit = {
for ((v, e) <- valid zip entry_data)
when (tag_v === virtual) { v := false.B }
}
def invalidateVPN(vpn: UInt, virtual: Bool): Unit = {
if (superpage) {
when (hit(vpn, virtual)) { invalidate() }
} else {
when (sectorTagMatch(vpn, virtual)) {
for (((v, e), i) <- (valid zip entry_data).zipWithIndex)
when (tag_v === virtual && i.U === sectorIdx(vpn)) { v := false.B }
}
}
// For fragmented superpage mappings, we assume the worst (largest)
// case, and zap entries whose most-significant VPNs match
when (((tag_vpn ^ vpn) >> (pgLevelBits * (pgLevels - 1))) === 0.U) {
for ((v, e) <- valid zip entry_data)
when (tag_v === virtual && e.fragmented_superpage) { v := false.B }
}
}
def invalidateNonGlobal(virtual: Bool): Unit = {
for ((v, e) <- valid zip entry_data)
when (tag_v === virtual && !e.g) { v := false.B }
}
}
/** TLB config
*
* @param nSets the number of sets of PTE, follow [[ICacheParams.nSets]]
* @param nWays the total number of wayss of PTE, follow [[ICacheParams.nWays]]
* @param nSectors the number of ways in a single PTE TLBEntry
* @param nSuperpageEntries the number of SuperpageEntries
*/
case class TLBConfig(
nSets: Int,
nWays: Int,
nSectors: Int = 4,
nSuperpageEntries: Int = 4)
/** =Overview=
* [[TLB]] is a TLB template which contains PMA logic and PMP checker.
*
* TLB caches PTE and accelerates the address translation process.
* When tlb miss happens, ask PTW(L2TLB) for Page Table Walk.
* Perform PMP and PMA check during the translation and throw exception if there were any.
*
* ==Cache Structure==
* - Sectored Entry (PTE)
* - set-associative or direct-mapped
* - nsets = [[TLBConfig.nSets]]
* - nways = [[TLBConfig.nWays]] / [[TLBConfig.nSectors]]
* - PTEEntry( sectors = [[TLBConfig.nSectors]] )
* - LRU(if set-associative)
*
* - Superpage Entry(superpage PTE)
* - fully associative
* - nsets = [[TLBConfig.nSuperpageEntries]]
* - PTEEntry(sectors = 1)
* - PseudoLRU
*
* - Special Entry(PTE across PMP)
* - nsets = 1
* - PTEEntry(sectors = 1)
*
* ==Address structure==
* {{{
* |vaddr |
* |ppn/vpn | pgIndex |
* | | |
* | |nSets |nSector | |}}}
*
* ==State Machine==
* {{{
* s_ready: ready to accept request from CPU.
* s_request: when L1TLB(this) miss, send request to PTW(L2TLB), .
* s_wait: wait for PTW to refill L1TLB.
* s_wait_invalidate: L1TLB is waiting for respond from PTW, but L1TLB will invalidate respond from PTW.}}}
*
* ==PMP==
* pmp check
* - special_entry: always check
* - other entry: check on refill
*
* ==Note==
* PMA consume diplomacy parameter generate physical memory address checking logic
*
* Boom use Rocket ITLB, and its own DTLB.
*
* Accelerators:{{{
* sha3: DTLB
* gemmini: DTLB
* hwacha: DTLB*2+ITLB}}}
* @param instruction true for ITLB, false for DTLB
* @param lgMaxSize @todo seems granularity
* @param cfg [[TLBConfig]]
* @param edge collect SoC metadata.
*/
class TLB(instruction: Boolean, lgMaxSize: Int, cfg: TLBConfig)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) {
override def desiredName = if (instruction) "ITLB" else "DTLB"
val io = IO(new Bundle {
/** request from Core */
val req = Flipped(Decoupled(new TLBReq(lgMaxSize)))
/** response to Core */
val resp = Output(new TLBResp(lgMaxSize))
/** SFence Input */
val sfence = Flipped(Valid(new SFenceReq))
/** IO to PTW */
val ptw = new TLBPTWIO
/** suppress a TLB refill, one cycle after a miss */
val kill = Input(Bool())
})
io.ptw.customCSRs := DontCare
val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits)
val vpn = io.req.bits.vaddr(vaddrBits-1, pgIdxBits)
/** index for sectored_Entry */
val memIdx = vpn.extract(cfg.nSectors.log2 + cfg.nSets.log2 - 1, cfg.nSectors.log2)
/** TLB Entry */
val sectored_entries = Reg(Vec(cfg.nSets, Vec(cfg.nWays / cfg.nSectors, new TLBEntry(cfg.nSectors, false, false))))
/** Superpage Entry */
val superpage_entries = Reg(Vec(cfg.nSuperpageEntries, new TLBEntry(1, true, true)))
/** Special Entry
*
* If PMP granularity is less than page size, thus need additional "special" entry manage PMP.
*/
val special_entry = (!pageGranularityPMPs).option(Reg(new TLBEntry(1, true, false)))
def ordinary_entries = sectored_entries(memIdx) ++ superpage_entries
def all_entries = ordinary_entries ++ special_entry
def all_real_entries = sectored_entries.flatten ++ superpage_entries ++ special_entry
val s_ready :: s_request :: s_wait :: s_wait_invalidate :: Nil = Enum(4)
val state = RegInit(s_ready)
// use vpn as refill_tag
val r_refill_tag = Reg(UInt(vpnBits.W))
val r_superpage_repl_addr = Reg(UInt(log2Ceil(superpage_entries.size).W))
val r_sectored_repl_addr = Reg(UInt(log2Ceil(sectored_entries.head.size).W))
val r_sectored_hit = Reg(Valid(UInt(log2Ceil(sectored_entries.head.size).W)))
val r_superpage_hit = Reg(Valid(UInt(log2Ceil(superpage_entries.size).W)))
val r_vstage1_en = Reg(Bool())
val r_stage2_en = Reg(Bool())
val r_need_gpa = Reg(Bool())
val r_gpa_valid = Reg(Bool())
val r_gpa = Reg(UInt(vaddrBits.W))
val r_gpa_vpn = Reg(UInt(vpnBits.W))
val r_gpa_is_pte = Reg(Bool())
/** privilege mode */
val priv = io.req.bits.prv
val priv_v = usingHypervisor.B && io.req.bits.v
val priv_s = priv(0)
// user mode and supervisor mode
val priv_uses_vm = priv <= PRV.S.U
val satp = Mux(priv_v, io.ptw.vsatp, io.ptw.ptbr)
val stage1_en = usingVM.B && satp.mode(satp.mode.getWidth-1)
/** VS-stage translation enable */
val vstage1_en = usingHypervisor.B && priv_v && io.ptw.vsatp.mode(io.ptw.vsatp.mode.getWidth-1)
/** G-stage translation enable */
val stage2_en = usingHypervisor.B && priv_v && io.ptw.hgatp.mode(io.ptw.hgatp.mode.getWidth-1)
/** Enable Virtual Memory when:
* 1. statically configured
* 1. satp highest bits enabled
* i. RV32:
* - 0 -> Bare
* - 1 -> SV32
* i. RV64:
* - 0000 -> Bare
* - 1000 -> SV39
* - 1001 -> SV48
* - 1010 -> SV57
* - 1011 -> SV64
* 1. In virtualization mode, vsatp highest bits enabled
* 1. priv mode in U and S.
* 1. in H & M mode, disable VM.
* 1. no passthrough(micro-arch defined.)
*
* @see RV-priv spec 4.1.11 Supervisor Address Translation and Protection (satp) Register
* @see RV-priv spec 8.2.18 Virtual Supervisor Address Translation and Protection Register (vsatp)
*/
val vm_enabled = (stage1_en || stage2_en) && priv_uses_vm && !io.req.bits.passthrough
// flush guest entries on vsatp.MODE Bare <-> SvXX transitions
val v_entries_use_stage1 = RegInit(false.B)
val vsatp_mode_mismatch = priv_v && (vstage1_en =/= v_entries_use_stage1) && !io.req.bits.passthrough
// share a single physical memory attribute checker (unshare if critical path)
val refill_ppn = io.ptw.resp.bits.pte.ppn(ppnBits-1, 0)
/** refill signal */
val do_refill = usingVM.B && io.ptw.resp.valid
/** sfence invalidate refill */
val invalidate_refill = state.isOneOf(s_request /* don't care */, s_wait_invalidate) || io.sfence.valid
// PMP
val mpu_ppn = Mux(do_refill, refill_ppn,
Mux(vm_enabled && special_entry.nonEmpty.B, special_entry.map(e => e.ppn(vpn, e.getData(vpn))).getOrElse(0.U), io.req.bits.vaddr >> pgIdxBits))
val mpu_physaddr = Cat(mpu_ppn, io.req.bits.vaddr(pgIdxBits-1, 0))
val mpu_priv = Mux[UInt](usingVM.B && (do_refill || io.req.bits.passthrough /* PTW */), PRV.S.U, Cat(io.ptw.status.debug, priv))
val pmp = Module(new PMPChecker(lgMaxSize))
pmp.io.addr := mpu_physaddr
pmp.io.size := io.req.bits.size
pmp.io.pmp := (io.ptw.pmp: Seq[PMP])
pmp.io.prv := mpu_priv
val pma = Module(new PMAChecker(edge.manager)(p))
pma.io.paddr := mpu_physaddr
// todo: using DataScratchpad doesn't support cacheable.
val cacheable = pma.io.resp.cacheable && (instruction || !usingDataScratchpad).B
val homogeneous = TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), BigInt(1) << pgIdxBits, 1 << lgMaxSize)(mpu_physaddr).homogeneous
// In M mode, if access DM address(debug module program buffer)
val deny_access_to_debug = mpu_priv <= PRV.M.U && p(DebugModuleKey).map(dmp => dmp.address.contains(mpu_physaddr)).getOrElse(false.B)
val prot_r = pma.io.resp.r && !deny_access_to_debug && pmp.io.r
val prot_w = pma.io.resp.w && !deny_access_to_debug && pmp.io.w
val prot_pp = pma.io.resp.pp
val prot_al = pma.io.resp.al
val prot_aa = pma.io.resp.aa
val prot_x = pma.io.resp.x && !deny_access_to_debug && pmp.io.x
val prot_eff = pma.io.resp.eff
// hit check
val sector_hits = sectored_entries(memIdx).map(_.sectorHit(vpn, priv_v))
val superpage_hits = superpage_entries.map(_.hit(vpn, priv_v))
val hitsVec = all_entries.map(vm_enabled && _.hit(vpn, priv_v))
val real_hits = hitsVec.asUInt
val hits = Cat(!vm_enabled, real_hits)
// use ptw response to refill
// permission bit arrays
when (do_refill) {
val pte = io.ptw.resp.bits.pte
val refill_v = r_vstage1_en || r_stage2_en
val newEntry = Wire(new TLBEntryData)
newEntry.ppn := pte.ppn
newEntry.c := cacheable
newEntry.u := pte.u
newEntry.g := pte.g && pte.v
newEntry.ae_ptw := io.ptw.resp.bits.ae_ptw
newEntry.ae_final := io.ptw.resp.bits.ae_final
newEntry.ae_stage2 := io.ptw.resp.bits.ae_final && io.ptw.resp.bits.gpa_is_pte && r_stage2_en
newEntry.pf := io.ptw.resp.bits.pf
newEntry.gf := io.ptw.resp.bits.gf
newEntry.hr := io.ptw.resp.bits.hr
newEntry.hw := io.ptw.resp.bits.hw
newEntry.hx := io.ptw.resp.bits.hx
newEntry.sr := pte.sr()
newEntry.sw := pte.sw()
newEntry.sx := pte.sx()
newEntry.pr := prot_r
newEntry.pw := prot_w
newEntry.px := prot_x
newEntry.ppp := prot_pp
newEntry.pal := prot_al
newEntry.paa := prot_aa
newEntry.eff := prot_eff
newEntry.fragmented_superpage := io.ptw.resp.bits.fragmented_superpage
// refill special_entry
when (special_entry.nonEmpty.B && !io.ptw.resp.bits.homogeneous) {
special_entry.foreach(_.insert(r_refill_tag, refill_v, io.ptw.resp.bits.level, newEntry))
}.elsewhen (io.ptw.resp.bits.level < (pgLevels-1).U) {
val waddr = Mux(r_superpage_hit.valid && usingHypervisor.B, r_superpage_hit.bits, r_superpage_repl_addr)
for ((e, i) <- superpage_entries.zipWithIndex) when (r_superpage_repl_addr === i.U) {
e.insert(r_refill_tag, refill_v, io.ptw.resp.bits.level, newEntry)
when (invalidate_refill) { e.invalidate() }
}
// refill sectored_hit
}.otherwise {
val r_memIdx = r_refill_tag.extract(cfg.nSectors.log2 + cfg.nSets.log2 - 1, cfg.nSectors.log2)
val waddr = Mux(r_sectored_hit.valid, r_sectored_hit.bits, r_sectored_repl_addr)
for ((e, i) <- sectored_entries(r_memIdx).zipWithIndex) when (waddr === i.U) {
when (!r_sectored_hit.valid) { e.invalidate() }
e.insert(r_refill_tag, refill_v, 0.U, newEntry)
when (invalidate_refill) { e.invalidate() }
}
}
r_gpa_valid := io.ptw.resp.bits.gpa.valid
r_gpa := io.ptw.resp.bits.gpa.bits
r_gpa_is_pte := io.ptw.resp.bits.gpa_is_pte
}
// get all entries data.
val entries = all_entries.map(_.getData(vpn))
val normal_entries = entries.take(ordinary_entries.size)
// parallel query PPN from [[all_entries]], if VM not enabled return VPN instead
val ppn = Mux1H(hitsVec :+ !vm_enabled, (all_entries zip entries).map{ case (entry, data) => entry.ppn(vpn, data) } :+ vpn(ppnBits-1, 0))
val nPhysicalEntries = 1 + special_entry.size
// generally PTW misaligned load exception.
val ptw_ae_array = Cat(false.B, entries.map(_.ae_ptw).asUInt)
val final_ae_array = Cat(false.B, entries.map(_.ae_final).asUInt)
val ptw_pf_array = Cat(false.B, entries.map(_.pf).asUInt)
val ptw_gf_array = Cat(false.B, entries.map(_.gf).asUInt)
val sum = Mux(priv_v, io.ptw.gstatus.sum, io.ptw.status.sum)
// if in hypervisor/machine mode, cannot read/write user entries.
// if in superviosr/user mode, "If the SUM bit in the sstatus register is set, supervisor mode software may also access pages with U=1.(from spec)"
val priv_rw_ok = Mux(!priv_s || sum, entries.map(_.u).asUInt, 0.U) | Mux(priv_s, ~entries.map(_.u).asUInt, 0.U)
// if in hypervisor/machine mode, other than user pages, all pages are executable.
// if in superviosr/user mode, only user page can execute.
val priv_x_ok = Mux(priv_s, ~entries.map(_.u).asUInt, entries.map(_.u).asUInt)
val stage1_bypass = Fill(entries.size, usingHypervisor.B) & (Fill(entries.size, !stage1_en) | entries.map(_.ae_stage2).asUInt)
val mxr = io.ptw.status.mxr | Mux(priv_v, io.ptw.gstatus.mxr, false.B)
// "The vsstatus field MXR, which makes execute-only pages readable, only overrides VS-stage page protection.(from spec)"
val r_array = Cat(true.B, (priv_rw_ok & (entries.map(_.sr).asUInt | Mux(mxr, entries.map(_.sx).asUInt, 0.U))) | stage1_bypass)
val w_array = Cat(true.B, (priv_rw_ok & entries.map(_.sw).asUInt) | stage1_bypass)
val x_array = Cat(true.B, (priv_x_ok & entries.map(_.sx).asUInt) | stage1_bypass)
val stage2_bypass = Fill(entries.size, !stage2_en)
val hr_array = Cat(true.B, entries.map(_.hr).asUInt | Mux(io.ptw.status.mxr, entries.map(_.hx).asUInt, 0.U) | stage2_bypass)
val hw_array = Cat(true.B, entries.map(_.hw).asUInt | stage2_bypass)
val hx_array = Cat(true.B, entries.map(_.hx).asUInt | stage2_bypass)
// These array is for each TLB entries.
// user mode can read: PMA OK, TLB OK, AE OK
val pr_array = Cat(Fill(nPhysicalEntries, prot_r), normal_entries.map(_.pr).asUInt) & ~(ptw_ae_array | final_ae_array)
// user mode can write: PMA OK, TLB OK, AE OK
val pw_array = Cat(Fill(nPhysicalEntries, prot_w), normal_entries.map(_.pw).asUInt) & ~(ptw_ae_array | final_ae_array)
// user mode can write: PMA OK, TLB OK, AE OK
val px_array = Cat(Fill(nPhysicalEntries, prot_x), normal_entries.map(_.px).asUInt) & ~(ptw_ae_array | final_ae_array)
// put effect
val eff_array = Cat(Fill(nPhysicalEntries, prot_eff), normal_entries.map(_.eff).asUInt)
// cacheable
val c_array = Cat(Fill(nPhysicalEntries, cacheable), normal_entries.map(_.c).asUInt)
// put partial
val ppp_array = Cat(Fill(nPhysicalEntries, prot_pp), normal_entries.map(_.ppp).asUInt)
// atomic arithmetic
val paa_array = Cat(Fill(nPhysicalEntries, prot_aa), normal_entries.map(_.paa).asUInt)
// atomic logic
val pal_array = Cat(Fill(nPhysicalEntries, prot_al), normal_entries.map(_.pal).asUInt)
val ppp_array_if_cached = ppp_array | c_array
val paa_array_if_cached = paa_array | (if(usingAtomicsInCache) c_array else 0.U)
val pal_array_if_cached = pal_array | (if(usingAtomicsInCache) c_array else 0.U)
val prefetchable_array = Cat((cacheable && homogeneous) << (nPhysicalEntries-1), normal_entries.map(_.c).asUInt)
// vaddr misaligned: vaddr[1:0]=b00
val misaligned = (io.req.bits.vaddr & (UIntToOH(io.req.bits.size) - 1.U)).orR
def badVA(guestPA: Boolean): Bool = {
val additionalPgLevels = (if (guestPA) io.ptw.hgatp else satp).additionalPgLevels
val extraBits = if (guestPA) hypervisorExtraAddrBits else 0
val signed = !guestPA
val nPgLevelChoices = pgLevels - minPgLevels + 1
val minVAddrBits = pgIdxBits + minPgLevels * pgLevelBits + extraBits
(for (i <- 0 until nPgLevelChoices) yield {
val mask = ((BigInt(1) << vaddrBitsExtended) - (BigInt(1) << (minVAddrBits + i * pgLevelBits - signed.toInt))).U
val maskedVAddr = io.req.bits.vaddr & mask
additionalPgLevels === i.U && !(maskedVAddr === 0.U || signed.B && maskedVAddr === mask)
}).orR
}
val bad_gpa =
if (!usingHypervisor) false.B
else vm_enabled && !stage1_en && badVA(true)
val bad_va =
if (!usingVM || (minPgLevels == pgLevels && vaddrBits == vaddrBitsExtended)) false.B
else vm_enabled && stage1_en && badVA(false)
val cmd_lrsc = usingAtomics.B && io.req.bits.cmd.isOneOf(M_XLR, M_XSC)
val cmd_amo_logical = usingAtomics.B && isAMOLogical(io.req.bits.cmd)
val cmd_amo_arithmetic = usingAtomics.B && isAMOArithmetic(io.req.bits.cmd)
val cmd_put_partial = io.req.bits.cmd === M_PWR
val cmd_read = isRead(io.req.bits.cmd)
val cmd_readx = usingHypervisor.B && io.req.bits.cmd === M_HLVX
val cmd_write = isWrite(io.req.bits.cmd)
val cmd_write_perms = cmd_write ||
io.req.bits.cmd.isOneOf(M_FLUSH_ALL, M_WOK) // not a write, but needs write permissions
val lrscAllowed = Mux((usingDataScratchpad || usingAtomicsOnlyForIO).B, 0.U, c_array)
val ae_array =
Mux(misaligned, eff_array, 0.U) |
Mux(cmd_lrsc, ~lrscAllowed, 0.U)
// access exception needs SoC information from PMA
val ae_ld_array = Mux(cmd_read, ae_array | ~pr_array, 0.U)
val ae_st_array =
Mux(cmd_write_perms, ae_array | ~pw_array, 0.U) |
Mux(cmd_put_partial, ~ppp_array_if_cached, 0.U) |
Mux(cmd_amo_logical, ~pal_array_if_cached, 0.U) |
Mux(cmd_amo_arithmetic, ~paa_array_if_cached, 0.U)
val must_alloc_array =
Mux(cmd_put_partial, ~ppp_array, 0.U) |
Mux(cmd_amo_logical, ~pal_array, 0.U) |
Mux(cmd_amo_arithmetic, ~paa_array, 0.U) |
Mux(cmd_lrsc, ~0.U(pal_array.getWidth.W), 0.U)
val pf_ld_array = Mux(cmd_read, ((~Mux(cmd_readx, x_array, r_array) & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array, 0.U)
val pf_st_array = Mux(cmd_write_perms, ((~w_array & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array, 0.U)
val pf_inst_array = ((~x_array & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array
val gf_ld_array = Mux(priv_v && cmd_read, (~Mux(cmd_readx, hx_array, hr_array) | ptw_gf_array) & ~ptw_ae_array, 0.U)
val gf_st_array = Mux(priv_v && cmd_write_perms, (~hw_array | ptw_gf_array) & ~ptw_ae_array, 0.U)
val gf_inst_array = Mux(priv_v, (~hx_array | ptw_gf_array) & ~ptw_ae_array, 0.U)
val gpa_hits = {
val need_gpa_mask = if (instruction) gf_inst_array else gf_ld_array | gf_st_array
val hit_mask = Fill(ordinary_entries.size, r_gpa_valid && r_gpa_vpn === vpn) | Fill(all_entries.size, !vstage1_en)
hit_mask | ~need_gpa_mask(all_entries.size-1, 0)
}
val tlb_hit_if_not_gpa_miss = real_hits.orR
val tlb_hit = (real_hits & gpa_hits).orR
// leads to s_request
val tlb_miss = vm_enabled && !vsatp_mode_mismatch && !bad_va && !tlb_hit
val sectored_plru = new SetAssocLRU(cfg.nSets, sectored_entries.head.size, "plru")
val superpage_plru = new PseudoLRU(superpage_entries.size)
when (io.req.valid && vm_enabled) {
// replace
when (sector_hits.orR) { sectored_plru.access(memIdx, OHToUInt(sector_hits)) }
when (superpage_hits.orR) { superpage_plru.access(OHToUInt(superpage_hits)) }
}
// Superpages create the possibility that two entries in the TLB may match.
// This corresponds to a software bug, but we can't return complete garbage;
// we must return either the old translation or the new translation. This
// isn't compatible with the Mux1H approach. So, flush the TLB and report
// a miss on duplicate entries.
val multipleHits = PopCountAtLeast(real_hits, 2)
// only pull up req.ready when this is s_ready state.
io.req.ready := state === s_ready
// page fault
io.resp.pf.ld := (bad_va && cmd_read) || (pf_ld_array & hits).orR
io.resp.pf.st := (bad_va && cmd_write_perms) || (pf_st_array & hits).orR
io.resp.pf.inst := bad_va || (pf_inst_array & hits).orR
// guest page fault
io.resp.gf.ld := (bad_gpa && cmd_read) || (gf_ld_array & hits).orR
io.resp.gf.st := (bad_gpa && cmd_write_perms) || (gf_st_array & hits).orR
io.resp.gf.inst := bad_gpa || (gf_inst_array & hits).orR
// access exception
io.resp.ae.ld := (ae_ld_array & hits).orR
io.resp.ae.st := (ae_st_array & hits).orR
io.resp.ae.inst := (~px_array & hits).orR
// misaligned
io.resp.ma.ld := misaligned && cmd_read
io.resp.ma.st := misaligned && cmd_write
io.resp.ma.inst := false.B // this is up to the pipeline to figure out
io.resp.cacheable := (c_array & hits).orR
io.resp.must_alloc := (must_alloc_array & hits).orR
io.resp.prefetchable := (prefetchable_array & hits).orR && edge.manager.managers.forall(m => !m.supportsAcquireB || m.supportsHint).B
io.resp.miss := do_refill || vsatp_mode_mismatch || tlb_miss || multipleHits
io.resp.paddr := Cat(ppn, io.req.bits.vaddr(pgIdxBits-1, 0))
io.resp.size := io.req.bits.size
io.resp.cmd := io.req.bits.cmd
io.resp.gpa_is_pte := vstage1_en && r_gpa_is_pte
io.resp.gpa := {
val page = Mux(!vstage1_en, Cat(bad_gpa, vpn), r_gpa >> pgIdxBits)
val offset = Mux(io.resp.gpa_is_pte, r_gpa(pgIdxBits-1, 0), io.req.bits.vaddr(pgIdxBits-1, 0))
Cat(page, offset)
}
io.ptw.req.valid := state === s_request
io.ptw.req.bits.valid := !io.kill
io.ptw.req.bits.bits.addr := r_refill_tag
io.ptw.req.bits.bits.vstage1 := r_vstage1_en
io.ptw.req.bits.bits.stage2 := r_stage2_en
io.ptw.req.bits.bits.need_gpa := r_need_gpa
if (usingVM) {
when(io.ptw.req.fire && io.ptw.req.bits.valid) {
r_gpa_valid := false.B
r_gpa_vpn := r_refill_tag
}
val sfence = io.sfence.valid
// this is [[s_ready]]
// handle miss/hit at the first cycle.
// if miss, request PTW(L2TLB).
when (io.req.fire && tlb_miss) {
state := s_request
r_refill_tag := vpn
r_need_gpa := tlb_hit_if_not_gpa_miss
r_vstage1_en := vstage1_en
r_stage2_en := stage2_en
r_superpage_repl_addr := replacementEntry(superpage_entries, superpage_plru.way)
r_sectored_repl_addr := replacementEntry(sectored_entries(memIdx), sectored_plru.way(memIdx))
r_sectored_hit.valid := sector_hits.orR
r_sectored_hit.bits := OHToUInt(sector_hits)
r_superpage_hit.valid := superpage_hits.orR
r_superpage_hit.bits := OHToUInt(superpage_hits)
}
// Handle SFENCE.VMA when send request to PTW.
// SFENCE.VMA io.ptw.req.ready kill
// ? ? 1
// 0 0 0
// 0 1 0 -> s_wait
// 1 0 0 -> s_wait_invalidate
// 1 0 0 -> s_ready
when (state === s_request) {
// SFENCE.VMA will kill TLB entries based on rs1 and rs2. It will take 1 cycle.
when (sfence) { state := s_ready }
// here should be io.ptw.req.fire, but assert(io.ptw.req.ready === true.B)
// fire -> s_wait
when (io.ptw.req.ready) { state := Mux(sfence, s_wait_invalidate, s_wait) }
// If CPU kills request(frontend.s2_redirect)
when (io.kill) { state := s_ready }
}
// sfence in refill will results in invalidate
when (state === s_wait && sfence) {
state := s_wait_invalidate
}
// after CPU acquire response, go back to s_ready.
when (io.ptw.resp.valid) {
state := s_ready
}
// SFENCE processing logic.
when (sfence) {
assert(!io.sfence.bits.rs1 || (io.sfence.bits.addr >> pgIdxBits) === vpn)
for (e <- all_real_entries) {
val hv = usingHypervisor.B && io.sfence.bits.hv
val hg = usingHypervisor.B && io.sfence.bits.hg
when (!hg && io.sfence.bits.rs1) { e.invalidateVPN(vpn, hv) }
.elsewhen (!hg && io.sfence.bits.rs2) { e.invalidateNonGlobal(hv) }
.otherwise { e.invalidate(hv || hg) }
}
}
when(io.req.fire && vsatp_mode_mismatch) {
all_real_entries.foreach(_.invalidate(true.B))
v_entries_use_stage1 := vstage1_en
}
when (multipleHits || reset.asBool) {
all_real_entries.foreach(_.invalidate())
}
ccover(io.ptw.req.fire, "MISS", "TLB miss")
ccover(io.ptw.req.valid && !io.ptw.req.ready, "PTW_STALL", "TLB miss, but PTW busy")
ccover(state === s_wait_invalidate, "SFENCE_DURING_REFILL", "flush TLB during TLB refill")
ccover(sfence && !io.sfence.bits.rs1 && !io.sfence.bits.rs2, "SFENCE_ALL", "flush TLB")
ccover(sfence && !io.sfence.bits.rs1 && io.sfence.bits.rs2, "SFENCE_ASID", "flush TLB ASID")
ccover(sfence && io.sfence.bits.rs1 && !io.sfence.bits.rs2, "SFENCE_LINE", "flush TLB line")
ccover(sfence && io.sfence.bits.rs1 && io.sfence.bits.rs2, "SFENCE_LINE_ASID", "flush TLB line/ASID")
ccover(multipleHits, "MULTIPLE_HITS", "Two matching translations in TLB")
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"${if (instruction) "I" else "D"}TLB_$label", "MemorySystem;;" + desc)
/** Decides which entry to be replaced
*
* If there is a invalid entry, replace it with priorityencoder;
* if not, replace the alt entry
*
* @return mask for TLBEntry replacement
*/
def replacementEntry(set: Seq[TLBEntry], alt: UInt) = {
val valids = set.map(_.valid.orR).asUInt
Mux(valids.andR, alt, PriorityEncoder(~valids))
}
}
File DCache.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.amba.AMBAProt
import freechips.rocketchip.diplomacy.{BufferParams}
import freechips.rocketchip.prci.{ClockCrossingType, RationalCrossing, SynchronousCrossing, AsynchronousCrossing, CreditedCrossing}
import freechips.rocketchip.tile.{CoreBundle, LookupByHartId}
import freechips.rocketchip.tilelink.{TLFIFOFixer,ClientMetadata, TLBundleA, TLAtomics, TLBundleB, TLPermissions}
import freechips.rocketchip.tilelink.TLMessages.{AccessAck, HintAck, AccessAckData, Grant, GrantData, ReleaseAck}
import freechips.rocketchip.util.{CanHaveErrors, ClockGate, IdentityCode, ReplacementPolicy, DescribedSRAM, property}
import freechips.rocketchip.util.BooleanToAugmentedBoolean
import freechips.rocketchip.util.UIntToAugmentedUInt
import freechips.rocketchip.util.UIntIsOneOf
import freechips.rocketchip.util.IntToAugmentedInt
import freechips.rocketchip.util.SeqToAugmentedSeq
import freechips.rocketchip.util.SeqBoolBitwiseOps
// TODO: delete this trait once deduplication is smart enough to avoid globally inlining matching circuits
trait InlineInstance { self: chisel3.experimental.BaseModule =>
chisel3.experimental.annotate(
new chisel3.experimental.ChiselAnnotation {
def toFirrtl: firrtl.annotations.Annotation = firrtl.passes.InlineAnnotation(self.toNamed) } )
}
class DCacheErrors(implicit p: Parameters) extends L1HellaCacheBundle()(p)
with CanHaveErrors {
val correctable = (cacheParams.tagCode.canCorrect || cacheParams.dataCode.canCorrect).option(Valid(UInt(paddrBits.W)))
val uncorrectable = (cacheParams.tagCode.canDetect || cacheParams.dataCode.canDetect).option(Valid(UInt(paddrBits.W)))
val bus = Valid(UInt(paddrBits.W))
}
class DCacheDataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val addr = UInt(untagBits.W)
val write = Bool()
val wdata = UInt((encBits * rowBytes / eccBytes).W)
val wordMask = UInt((rowBytes / subWordBytes).W)
val eccMask = UInt((wordBytes / eccBytes).W)
val way_en = UInt(nWays.W)
}
class DCacheDataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val io = IO(new Bundle {
val req = Flipped(Valid(new DCacheDataReq))
val resp = Output(Vec(nWays, UInt((req.bits.wdata.getWidth).W)))
})
require(rowBits % subWordBits == 0, "rowBits must be a multiple of subWordBits")
val eccMask = if (eccBits == subWordBits) Seq(true.B) else io.req.bits.eccMask.asBools
val wMask = if (nWays == 1) eccMask else (0 until nWays).flatMap(i => eccMask.map(_ && io.req.bits.way_en(i)))
val wWords = io.req.bits.wdata.grouped(encBits * (subWordBits / eccBits))
val addr = io.req.bits.addr >> rowOffBits
val data_arrays = Seq.tabulate(rowBits / subWordBits) {
i =>
DescribedSRAM(
name = s"${tileParams.baseName}_dcache_data_arrays_${i}",
desc = "DCache Data Array",
size = nSets * cacheBlockBytes / rowBytes,
data = Vec(nWays * (subWordBits / eccBits), UInt(encBits.W))
)
}
val rdata = for ((array , i) <- data_arrays.zipWithIndex) yield {
val valid = io.req.valid && ((data_arrays.size == 1).B || io.req.bits.wordMask(i))
when (valid && io.req.bits.write) {
val wMaskSlice = (0 until wMask.size).filter(j => i % (wordBits/subWordBits) == (j % (wordBytes/eccBytes)) / (subWordBytes/eccBytes)).map(wMask(_))
val wData = wWords(i).grouped(encBits)
array.write(addr, VecInit((0 until nWays).flatMap(i => wData)), wMaskSlice)
}
val data = array.read(addr, valid && !io.req.bits.write)
data.grouped(subWordBits / eccBits).map(_.asUInt).toSeq
}
(io.resp zip rdata.transpose).foreach { case (resp, data) => resp := data.asUInt }
}
class DCacheMetadataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val write = Bool()
val addr = UInt(vaddrBitsExtended.W)
val idx = UInt(idxBits.W)
val way_en = UInt(nWays.W)
val data = UInt(cacheParams.tagCode.width(new L1Metadata().getWidth).W)
}
class DCache(staticIdForMetadataUseOnly: Int, val crossing: ClockCrossingType)(implicit p: Parameters) extends HellaCache(staticIdForMetadataUseOnly)(p) {
override lazy val module = new DCacheModule(this)
}
class DCacheTLBPort(implicit p: Parameters) extends CoreBundle()(p) {
val req = Flipped(Decoupled(new TLBReq(coreDataBytes.log2)))
val s1_resp = Output(new TLBResp(coreDataBytes.log2))
val s2_kill = Input(Bool())
}
class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
val tECC = cacheParams.tagCode
val dECC = cacheParams.dataCode
require(subWordBits % eccBits == 0, "subWordBits must be a multiple of eccBits")
require(eccBytes == 1 || !dECC.isInstanceOf[IdentityCode])
require(cacheParams.silentDrop || cacheParams.acquireBeforeRelease, "!silentDrop requires acquireBeforeRelease")
val usingRMW = eccBytes > 1 || usingAtomicsInCache
val mmioOffset = outer.firstMMIO
edge.manager.requireFifo(TLFIFOFixer.allVolatile) // TileLink pipelining MMIO requests
val clock_en_reg = Reg(Bool())
io.cpu.clock_enabled := clock_en_reg
val gated_clock =
if (!cacheParams.clockGate) clock
else ClockGate(clock, clock_en_reg, "dcache_clock_gate")
class DCacheModuleImpl { // entering gated-clock domain
val tlb = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays, cacheParams.nTLBBasePageSectors, cacheParams.nTLBSuperpages)))
val pma_checker = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays, cacheParams.nTLBBasePageSectors, cacheParams.nTLBSuperpages)) with InlineInstance)
// tags
val replacer = ReplacementPolicy.fromString(cacheParams.replacementPolicy, nWays)
/** Metadata Arbiter:
* 0: Tag update on reset
* 1: Tag update on ECC error
* 2: Tag update on hit
* 3: Tag update on refill
* 4: Tag update on release
* 5: Tag update on flush
* 6: Tag update on probe
* 7: Tag update on CPU request
*/
val metaArb = Module(new Arbiter(new DCacheMetadataReq, 8) with InlineInstance)
val tag_array = DescribedSRAM(
name = s"${tileParams.baseName}_dcache_tag_array",
desc = "DCache Tag Array",
size = nSets,
data = Vec(nWays, chiselTypeOf(metaArb.io.out.bits.data))
)
// data
val data = Module(new DCacheDataArray)
/** Data Arbiter
* 0: data from pending store buffer
* 1: data from TL-D refill
* 2: release to TL-A
* 3: hit path to CPU
*/
val dataArb = Module(new Arbiter(new DCacheDataReq, 4) with InlineInstance)
dataArb.io.in.tail.foreach(_.bits.wdata := dataArb.io.in.head.bits.wdata) // tie off write ports by default
data.io.req.bits <> dataArb.io.out.bits
data.io.req.valid := dataArb.io.out.valid
dataArb.io.out.ready := true.B
metaArb.io.out.ready := clock_en_reg
val tl_out_a = Wire(chiselTypeOf(tl_out.a))
tl_out.a <> {
val a_queue_depth = outer.crossing match {
case RationalCrossing(_) => // TODO make this depend on the actual ratio?
if (cacheParams.separateUncachedResp) (maxUncachedInFlight + 1) / 2
else 2 min maxUncachedInFlight-1
case SynchronousCrossing(BufferParams.none) => 1 // Need some buffering to guarantee livelock freedom
case SynchronousCrossing(_) => 0 // Adequate buffering within the crossing
case _: AsynchronousCrossing => 0 // Adequate buffering within the crossing
case _: CreditedCrossing => 0 // Adequate buffering within the crossing
}
Queue(tl_out_a, a_queue_depth, flow = true)
}
val (tl_out_c, release_queue_empty) =
if (cacheParams.acquireBeforeRelease) {
val q = Module(new Queue(chiselTypeOf(tl_out.c.bits), cacheDataBeats, flow = true))
tl_out.c <> q.io.deq
(q.io.enq, q.io.count === 0.U)
} else {
(tl_out.c, true.B)
}
val s1_valid = RegNext(io.cpu.req.fire, false.B)
val s1_probe = RegNext(tl_out.b.fire, false.B)
val probe_bits = RegEnable(tl_out.b.bits, tl_out.b.fire) // TODO has data now :(
val s1_nack = WireDefault(false.B)
val s1_valid_masked = s1_valid && !io.cpu.s1_kill
val s1_valid_not_nacked = s1_valid && !s1_nack
val s1_tlb_req_valid = RegNext(io.tlb_port.req.fire, false.B)
val s2_tlb_req_valid = RegNext(s1_tlb_req_valid, false.B)
val s0_clk_en = metaArb.io.out.valid && !metaArb.io.out.bits.write
val s0_req = WireInit(io.cpu.req.bits)
s0_req.addr := Cat(metaArb.io.out.bits.addr >> blockOffBits, io.cpu.req.bits.addr(blockOffBits-1,0))
s0_req.idx.foreach(_ := Cat(metaArb.io.out.bits.idx, s0_req.addr(blockOffBits-1, 0)))
when (!metaArb.io.in(7).ready) { s0_req.phys := true.B }
val s1_req = RegEnable(s0_req, s0_clk_en)
val s1_vaddr = Cat(s1_req.idx.getOrElse(s1_req.addr) >> tagLSB, s1_req.addr(tagLSB-1, 0))
val s0_tlb_req = WireInit(io.tlb_port.req.bits)
when (!io.tlb_port.req.fire) {
s0_tlb_req.passthrough := s0_req.phys
s0_tlb_req.vaddr := s0_req.addr
s0_tlb_req.size := s0_req.size
s0_tlb_req.cmd := s0_req.cmd
s0_tlb_req.prv := s0_req.dprv
s0_tlb_req.v := s0_req.dv
}
val s1_tlb_req = RegEnable(s0_tlb_req, s0_clk_en || io.tlb_port.req.valid)
val s1_read = isRead(s1_req.cmd)
val s1_write = isWrite(s1_req.cmd)
val s1_readwrite = s1_read || s1_write
val s1_sfence = s1_req.cmd === M_SFENCE || s1_req.cmd === M_HFENCEV || s1_req.cmd === M_HFENCEG
val s1_flush_line = s1_req.cmd === M_FLUSH_ALL && s1_req.size(0)
val s1_flush_valid = Reg(Bool())
val s1_waw_hazard = Wire(Bool())
val s_ready :: s_voluntary_writeback :: s_probe_rep_dirty :: s_probe_rep_clean :: s_probe_retry :: s_probe_rep_miss :: s_voluntary_write_meta :: s_probe_write_meta :: s_dummy :: s_voluntary_release :: Nil = Enum(10)
val supports_flush = outer.flushOnFenceI || coreParams.haveCFlush
val flushed = RegInit(true.B)
val flushing = RegInit(false.B)
val flushing_req = Reg(chiselTypeOf(s1_req))
val cached_grant_wait = RegInit(false.B)
val resetting = RegInit(false.B)
val flushCounter = RegInit((nSets * (nWays-1)).U(log2Ceil(nSets * nWays).W))
val release_ack_wait = RegInit(false.B)
val release_ack_addr = Reg(UInt(paddrBits.W))
val release_state = RegInit(s_ready)
val refill_way = Reg(UInt())
val any_pstore_valid = Wire(Bool())
val inWriteback = release_state.isOneOf(s_voluntary_writeback, s_probe_rep_dirty)
val releaseWay = Wire(UInt())
io.cpu.req.ready := (release_state === s_ready) && !cached_grant_wait && !s1_nack
// I/O MSHRs
val uncachedInFlight = RegInit(VecInit(Seq.fill(maxUncachedInFlight)(false.B)))
val uncachedReqs = Reg(Vec(maxUncachedInFlight, new HellaCacheReq))
val uncachedResp = WireInit(new HellaCacheReq, DontCare)
// hit initiation path
val s0_read = isRead(io.cpu.req.bits.cmd)
dataArb.io.in(3).valid := io.cpu.req.valid && likelyNeedsRead(io.cpu.req.bits)
dataArb.io.in(3).bits := dataArb.io.in(1).bits
dataArb.io.in(3).bits.write := false.B
dataArb.io.in(3).bits.addr := Cat(io.cpu.req.bits.idx.getOrElse(io.cpu.req.bits.addr) >> tagLSB, io.cpu.req.bits.addr(tagLSB-1, 0))
dataArb.io.in(3).bits.wordMask := {
val mask = (subWordBytes.log2 until rowOffBits).foldLeft(1.U) { case (in, i) =>
val upper_mask = Mux((i >= wordBytes.log2).B || io.cpu.req.bits.size <= i.U, 0.U,
((BigInt(1) << (1 << (i - subWordBytes.log2)))-1).U)
val upper = Mux(io.cpu.req.bits.addr(i), in, 0.U) | upper_mask
val lower = Mux(io.cpu.req.bits.addr(i), 0.U, in)
upper ## lower
}
Fill(subWordBytes / eccBytes, mask)
}
dataArb.io.in(3).bits.eccMask := ~0.U((wordBytes / eccBytes).W)
dataArb.io.in(3).bits.way_en := ~0.U(nWays.W)
when (!dataArb.io.in(3).ready && s0_read) { io.cpu.req.ready := false.B }
val s1_did_read = RegEnable(dataArb.io.in(3).ready && (io.cpu.req.valid && needsRead(io.cpu.req.bits)), s0_clk_en)
val s1_read_mask = RegEnable(dataArb.io.in(3).bits.wordMask, s0_clk_en)
metaArb.io.in(7).valid := io.cpu.req.valid
metaArb.io.in(7).bits.write := false.B
metaArb.io.in(7).bits.idx := dataArb.io.in(3).bits.addr(idxMSB, idxLSB)
metaArb.io.in(7).bits.addr := io.cpu.req.bits.addr
metaArb.io.in(7).bits.way_en := metaArb.io.in(4).bits.way_en
metaArb.io.in(7).bits.data := metaArb.io.in(4).bits.data
when (!metaArb.io.in(7).ready) { io.cpu.req.ready := false.B }
// address translation
val s1_cmd_uses_tlb = s1_readwrite || s1_flush_line || s1_req.cmd === M_WOK
io.ptw <> tlb.io.ptw
tlb.io.kill := io.cpu.s2_kill || s2_tlb_req_valid && io.tlb_port.s2_kill
tlb.io.req.valid := s1_tlb_req_valid || s1_valid && !io.cpu.s1_kill && s1_cmd_uses_tlb
tlb.io.req.bits := s1_tlb_req
when (!tlb.io.req.ready && !tlb.io.ptw.resp.valid && !io.cpu.req.bits.phys) { io.cpu.req.ready := false.B }
when (!s1_tlb_req_valid && s1_valid && s1_cmd_uses_tlb && tlb.io.resp.miss) { s1_nack := true.B }
tlb.io.sfence.valid := s1_valid && !io.cpu.s1_kill && s1_sfence
tlb.io.sfence.bits.rs1 := s1_req.size(0)
tlb.io.sfence.bits.rs2 := s1_req.size(1)
tlb.io.sfence.bits.asid := io.cpu.s1_data.data
tlb.io.sfence.bits.addr := s1_req.addr
tlb.io.sfence.bits.hv := s1_req.cmd === M_HFENCEV
tlb.io.sfence.bits.hg := s1_req.cmd === M_HFENCEG
io.tlb_port.req.ready := clock_en_reg
io.tlb_port.s1_resp := tlb.io.resp
when (s1_tlb_req_valid && s1_valid && !(s1_req.phys && s1_req.no_xcpt)) { s1_nack := true.B }
pma_checker.io <> DontCare
pma_checker.io.req.bits.passthrough := true.B
pma_checker.io.req.bits.vaddr := s1_req.addr
pma_checker.io.req.bits.size := s1_req.size
pma_checker.io.req.bits.cmd := s1_req.cmd
pma_checker.io.req.bits.prv := s1_req.dprv
pma_checker.io.req.bits.v := s1_req.dv
val s1_paddr = Cat(Mux(s1_tlb_req_valid, s1_req.addr(paddrBits-1, pgIdxBits), tlb.io.resp.paddr >> pgIdxBits), s1_req.addr(pgIdxBits-1, 0))
val s1_victim_way = Wire(UInt())
val (s1_hit_way, s1_hit_state, s1_meta) =
if (usingDataScratchpad) {
val baseAddr = p(LookupByHartId)(_.dcache.flatMap(_.scratch.map(_.U)), io_hartid.get) | io_mmio_address_prefix.get
val inScratchpad = s1_paddr >= baseAddr && s1_paddr < baseAddr + (nSets * cacheBlockBytes).U
val hitState = Mux(inScratchpad, ClientMetadata.maximum, ClientMetadata.onReset)
val dummyMeta = L1Metadata(0.U, ClientMetadata.onReset)
(inScratchpad, hitState, Seq(tECC.encode(dummyMeta.asUInt)))
} else {
val metaReq = metaArb.io.out
val metaIdx = metaReq.bits.idx
when (metaReq.valid && metaReq.bits.write) {
val wmask = if (nWays == 1) Seq(true.B) else metaReq.bits.way_en.asBools
tag_array.write(metaIdx, VecInit(Seq.fill(nWays)(metaReq.bits.data)), wmask)
}
val s1_meta = tag_array.read(metaIdx, metaReq.valid && !metaReq.bits.write)
val s1_meta_uncorrected = s1_meta.map(tECC.decode(_).uncorrected.asTypeOf(new L1Metadata))
val s1_tag = s1_paddr >> tagLSB
val s1_meta_hit_way = s1_meta_uncorrected.map(r => r.coh.isValid() && r.tag === s1_tag).asUInt
val s1_meta_hit_state = (
s1_meta_uncorrected.map(r => Mux(r.tag === s1_tag && !s1_flush_valid, r.coh.asUInt, 0.U))
.reduce (_|_)).asTypeOf(chiselTypeOf(ClientMetadata.onReset))
(s1_meta_hit_way, s1_meta_hit_state, s1_meta)
}
val s1_data_way = WireDefault(if (nWays == 1) 1.U else Mux(inWriteback, releaseWay, s1_hit_way))
val tl_d_data_encoded = Wire(chiselTypeOf(encodeData(tl_out.d.bits.data, false.B)))
val s1_all_data_ways = VecInit(data.io.resp ++ (!cacheParams.separateUncachedResp).option(tl_d_data_encoded))
val s1_mask_xwr = new StoreGen(s1_req.size, s1_req.addr, 0.U, wordBytes).mask
val s1_mask = Mux(s1_req.cmd === M_PWR, io.cpu.s1_data.mask, s1_mask_xwr)
// for partial writes, s1_data.mask must be a subset of s1_mask_xwr
assert(!(s1_valid_masked && s1_req.cmd === M_PWR) || (s1_mask_xwr | ~io.cpu.s1_data.mask).andR)
val s2_valid = RegNext(s1_valid_masked && !s1_sfence, init=false.B)
val s2_valid_no_xcpt = s2_valid && !io.cpu.s2_xcpt.asUInt.orR
val s2_probe = RegNext(s1_probe, init=false.B)
val releaseInFlight = s1_probe || s2_probe || release_state =/= s_ready
val s2_not_nacked_in_s1 = RegNext(!s1_nack)
val s2_valid_not_nacked_in_s1 = s2_valid && s2_not_nacked_in_s1
val s2_valid_masked = s2_valid_no_xcpt && s2_not_nacked_in_s1
val s2_valid_not_killed = s2_valid_masked && !io.cpu.s2_kill
val s2_req = Reg(chiselTypeOf(io.cpu.req.bits))
val s2_cmd_flush_all = s2_req.cmd === M_FLUSH_ALL && !s2_req.size(0)
val s2_cmd_flush_line = s2_req.cmd === M_FLUSH_ALL && s2_req.size(0)
val s2_tlb_xcpt = Reg(chiselTypeOf(tlb.io.resp))
val s2_pma = Reg(chiselTypeOf(tlb.io.resp))
val s2_uncached_resp_addr = Reg(chiselTypeOf(s2_req.addr)) // should be DCE'd in synthesis
when (s1_valid_not_nacked || s1_flush_valid) {
s2_req := s1_req
s2_req.addr := s1_paddr
s2_tlb_xcpt := tlb.io.resp
s2_pma := Mux(s1_tlb_req_valid, pma_checker.io.resp, tlb.io.resp)
}
val s2_vaddr = Cat(RegEnable(s1_vaddr, s1_valid_not_nacked || s1_flush_valid) >> tagLSB, s2_req.addr(tagLSB-1, 0))
val s2_read = isRead(s2_req.cmd)
val s2_write = isWrite(s2_req.cmd)
val s2_readwrite = s2_read || s2_write
val s2_flush_valid_pre_tag_ecc = RegNext(s1_flush_valid)
val s1_meta_decoded = s1_meta.map(tECC.decode(_))
val s1_meta_clk_en = s1_valid_not_nacked || s1_flush_valid || s1_probe
val s2_meta_correctable_errors = s1_meta_decoded.map(m => RegEnable(m.correctable, s1_meta_clk_en)).asUInt
val s2_meta_uncorrectable_errors = s1_meta_decoded.map(m => RegEnable(m.uncorrectable, s1_meta_clk_en)).asUInt
val s2_meta_error_uncorrectable = s2_meta_uncorrectable_errors.orR
val s2_meta_corrected = s1_meta_decoded.map(m => RegEnable(m.corrected, s1_meta_clk_en).asTypeOf(new L1Metadata))
val s2_meta_error = (s2_meta_uncorrectable_errors | s2_meta_correctable_errors).orR
val s2_flush_valid = s2_flush_valid_pre_tag_ecc && !s2_meta_error
val s2_data = {
val wordsPerRow = rowBits / subWordBits
val en = s1_valid || inWriteback || io.cpu.replay_next
val word_en = Mux(inWriteback, Fill(wordsPerRow, 1.U), Mux(s1_did_read, s1_read_mask, 0.U))
val s1_way_words = s1_all_data_ways.map(_.grouped(dECC.width(eccBits) * (subWordBits / eccBits)))
if (cacheParams.pipelineWayMux) {
val s1_word_en = Mux(io.cpu.replay_next, 0.U, word_en)
(for (i <- 0 until wordsPerRow) yield {
val s2_way_en = RegEnable(Mux(s1_word_en(i), s1_data_way, 0.U), en)
val s2_way_words = (0 until nWays).map(j => RegEnable(s1_way_words(j)(i), en && word_en(i)))
(0 until nWays).map(j => Mux(s2_way_en(j), s2_way_words(j), 0.U)).reduce(_|_)
}).asUInt
} else {
val s1_word_en = Mux(!io.cpu.replay_next, word_en, UIntToOH(uncachedResp.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes)), wordsPerRow))
(for (i <- 0 until wordsPerRow) yield {
RegEnable(Mux1H(Mux(s1_word_en(i), s1_data_way, 0.U), s1_way_words.map(_(i))), en)
}).asUInt
}
}
val s2_probe_way = RegEnable(s1_hit_way, s1_probe)
val s2_probe_state = RegEnable(s1_hit_state, s1_probe)
val s2_hit_way = RegEnable(s1_hit_way, s1_valid_not_nacked)
val s2_hit_state = RegEnable(s1_hit_state, s1_valid_not_nacked || s1_flush_valid)
val s2_waw_hazard = RegEnable(s1_waw_hazard, s1_valid_not_nacked)
val s2_store_merge = Wire(Bool())
val s2_hit_valid = s2_hit_state.isValid()
val (s2_hit, s2_grow_param, s2_new_hit_state) = s2_hit_state.onAccess(s2_req.cmd)
val s2_data_decoded = decodeData(s2_data)
val s2_word_idx = s2_req.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes))
val s2_data_error = s2_data_decoded.map(_.error).orR
val s2_data_error_uncorrectable = s2_data_decoded.map(_.uncorrectable).orR
val s2_data_corrected = (s2_data_decoded.map(_.corrected): Seq[UInt]).asUInt
val s2_data_uncorrected = (s2_data_decoded.map(_.uncorrected): Seq[UInt]).asUInt
val s2_valid_hit_maybe_flush_pre_data_ecc_and_waw = s2_valid_masked && !s2_meta_error && s2_hit
val s2_no_alloc_hazard = if (!usingVM || pgIdxBits >= untagBits) false.B else {
// make sure that any in-flight non-allocating accesses are ordered before
// any allocating accesses. this can only happen if aliasing is possible.
val any_no_alloc_in_flight = Reg(Bool())
when (!uncachedInFlight.asUInt.orR) { any_no_alloc_in_flight := false.B }
when (s2_valid && s2_req.no_alloc) { any_no_alloc_in_flight := true.B }
val s1_need_check = any_no_alloc_in_flight || s2_valid && s2_req.no_alloc
val concerns = (uncachedInFlight zip uncachedReqs) :+ (s2_valid && s2_req.no_alloc, s2_req)
val s1_uncached_hits = concerns.map { c =>
val concern_wmask = new StoreGen(c._2.size, c._2.addr, 0.U, wordBytes).mask
val addr_match = (c._2.addr ^ s1_paddr)(pgIdxBits+pgLevelBits-1, wordBytes.log2) === 0.U
val mask_match = (concern_wmask & s1_mask_xwr).orR || c._2.cmd === M_PWR || s1_req.cmd === M_PWR
val cmd_match = isWrite(c._2.cmd) || isWrite(s1_req.cmd)
c._1 && s1_need_check && cmd_match && addr_match && mask_match
}
val s2_uncached_hits = RegEnable(s1_uncached_hits.asUInt, s1_valid_not_nacked)
s2_uncached_hits.orR
}
val s2_valid_hit_pre_data_ecc_and_waw = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw && s2_readwrite && !s2_no_alloc_hazard
val s2_valid_flush_line = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw && s2_cmd_flush_line
val s2_valid_hit_pre_data_ecc = s2_valid_hit_pre_data_ecc_and_waw && (!s2_waw_hazard || s2_store_merge)
val s2_valid_data_error = s2_valid_hit_pre_data_ecc_and_waw && s2_data_error
val s2_valid_hit = s2_valid_hit_pre_data_ecc && !s2_data_error
val s2_valid_miss = s2_valid_masked && s2_readwrite && !s2_meta_error && !s2_hit
val s2_uncached = !s2_pma.cacheable || s2_req.no_alloc && !s2_pma.must_alloc && !s2_hit_valid
val s2_valid_cached_miss = s2_valid_miss && !s2_uncached && !uncachedInFlight.asUInt.orR
dontTouch(s2_valid_cached_miss)
val s2_want_victimize = (!usingDataScratchpad).B && (s2_valid_cached_miss || s2_valid_flush_line || s2_valid_data_error || s2_flush_valid)
val s2_cannot_victimize = !s2_flush_valid && io.cpu.s2_kill
val s2_victimize = s2_want_victimize && !s2_cannot_victimize
val s2_valid_uncached_pending = s2_valid_miss && s2_uncached && !uncachedInFlight.asUInt.andR
val s2_victim_way = UIntToOH(RegEnable(s1_victim_way, s1_valid_not_nacked || s1_flush_valid))
val s2_victim_or_hit_way = Mux(s2_hit_valid, s2_hit_way, s2_victim_way)
val s2_victim_tag = Mux(s2_valid_data_error || s2_valid_flush_line, s2_req.addr(paddrBits-1, tagLSB), Mux1H(s2_victim_way, s2_meta_corrected).tag)
val s2_victim_state = Mux(s2_hit_valid, s2_hit_state, Mux1H(s2_victim_way, s2_meta_corrected).coh)
val (s2_prb_ack_data, s2_report_param, probeNewCoh)= s2_probe_state.onProbe(probe_bits.param)
val (s2_victim_dirty, s2_shrink_param, voluntaryNewCoh) = s2_victim_state.onCacheControl(M_FLUSH)
dontTouch(s2_victim_dirty)
val s2_update_meta = s2_hit_state =/= s2_new_hit_state
val s2_dont_nack_uncached = s2_valid_uncached_pending && tl_out_a.ready
val s2_dont_nack_misc = s2_valid_masked && !s2_meta_error &&
(supports_flush.B && s2_cmd_flush_all && flushed && !flushing ||
supports_flush.B && s2_cmd_flush_line && !s2_hit ||
s2_req.cmd === M_WOK)
io.cpu.s2_nack := s2_valid_no_xcpt && !s2_dont_nack_uncached && !s2_dont_nack_misc && !s2_valid_hit
when (io.cpu.s2_nack || (s2_valid_hit_pre_data_ecc_and_waw && s2_update_meta)) { s1_nack := true.B }
// tag updates on ECC errors
val s2_first_meta_corrected = PriorityMux(s2_meta_correctable_errors, s2_meta_corrected)
metaArb.io.in(1).valid := s2_meta_error && (s2_valid_masked || s2_flush_valid_pre_tag_ecc || s2_probe)
metaArb.io.in(1).bits.write := true.B
metaArb.io.in(1).bits.way_en := s2_meta_uncorrectable_errors | Mux(s2_meta_error_uncorrectable, 0.U, PriorityEncoderOH(s2_meta_correctable_errors))
metaArb.io.in(1).bits.idx := Mux(s2_probe, probeIdx(probe_bits), s2_vaddr(idxMSB, idxLSB))
metaArb.io.in(1).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaArb.io.in(1).bits.idx << blockOffBits)
metaArb.io.in(1).bits.data := tECC.encode {
val new_meta = WireDefault(s2_first_meta_corrected)
when (s2_meta_error_uncorrectable) { new_meta.coh := ClientMetadata.onReset }
new_meta.asUInt
}
// tag updates on hit
metaArb.io.in(2).valid := s2_valid_hit_pre_data_ecc_and_waw && s2_update_meta
metaArb.io.in(2).bits.write := !io.cpu.s2_kill
metaArb.io.in(2).bits.way_en := s2_victim_or_hit_way
metaArb.io.in(2).bits.idx := s2_vaddr(idxMSB, idxLSB)
metaArb.io.in(2).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, s2_vaddr(idxMSB, 0))
metaArb.io.in(2).bits.data := tECC.encode(L1Metadata(s2_req.addr >> tagLSB, s2_new_hit_state).asUInt)
// load reservations and TL error reporting
val s2_lr = (usingAtomics && !usingDataScratchpad).B && s2_req.cmd === M_XLR
val s2_sc = (usingAtomics && !usingDataScratchpad).B && s2_req.cmd === M_XSC
val lrscCount = RegInit(0.U)
val lrscValid = lrscCount > lrscBackoff.U
val lrscBackingOff = lrscCount > 0.U && !lrscValid
val lrscAddr = Reg(UInt())
val lrscAddrMatch = lrscAddr === (s2_req.addr >> blockOffBits)
val s2_sc_fail = s2_sc && !(lrscValid && lrscAddrMatch)
when ((s2_valid_hit && s2_lr && !cached_grant_wait || s2_valid_cached_miss) && !io.cpu.s2_kill) {
lrscCount := Mux(s2_hit, (lrscCycles - 1).U, 0.U)
lrscAddr := s2_req.addr >> blockOffBits
}
when (lrscCount > 0.U) { lrscCount := lrscCount - 1.U }
when (s2_valid_not_killed && lrscValid) { lrscCount := lrscBackoff.U }
when (s1_probe) { lrscCount := 0.U }
// don't perform data correction if it might clobber a recent store
val s2_correct = s2_data_error && !any_pstore_valid && !RegNext(any_pstore_valid || s2_valid) && usingDataScratchpad.B
// pending store buffer
val s2_valid_correct = s2_valid_hit_pre_data_ecc_and_waw && s2_correct && !io.cpu.s2_kill
def s2_store_valid_pre_kill = s2_valid_hit && s2_write && !s2_sc_fail
def s2_store_valid = s2_store_valid_pre_kill && !io.cpu.s2_kill
val pstore1_cmd = RegEnable(s1_req.cmd, s1_valid_not_nacked && s1_write)
val pstore1_addr = RegEnable(s1_vaddr, s1_valid_not_nacked && s1_write)
val pstore1_data = RegEnable(io.cpu.s1_data.data, s1_valid_not_nacked && s1_write)
val pstore1_way = RegEnable(s1_hit_way, s1_valid_not_nacked && s1_write)
val pstore1_mask = RegEnable(s1_mask, s1_valid_not_nacked && s1_write)
val pstore1_storegen_data = WireDefault(pstore1_data)
val pstore1_rmw = usingRMW.B && RegEnable(needsRead(s1_req), s1_valid_not_nacked && s1_write)
val pstore1_merge_likely = s2_valid_not_nacked_in_s1 && s2_write && s2_store_merge
val pstore1_merge = s2_store_valid && s2_store_merge
val pstore2_valid = RegInit(false.B)
val pstore_drain_opportunistic = !(io.cpu.req.valid && likelyNeedsRead(io.cpu.req.bits)) && !(s1_valid && s1_waw_hazard)
val pstore_drain_on_miss = releaseInFlight || RegNext(io.cpu.s2_nack)
val pstore1_held = RegInit(false.B)
val pstore1_valid_likely = s2_valid && s2_write || pstore1_held
def pstore1_valid_not_rmw(s2_kill: Bool) = s2_valid_hit_pre_data_ecc && s2_write && !s2_kill || pstore1_held
val pstore1_valid = s2_store_valid || pstore1_held
any_pstore_valid := pstore1_held || pstore2_valid
val pstore_drain_structural = pstore1_valid_likely && pstore2_valid && ((s1_valid && s1_write) || pstore1_rmw)
assert(pstore1_rmw || pstore1_valid_not_rmw(io.cpu.s2_kill) === pstore1_valid)
ccover(pstore_drain_structural, "STORE_STRUCTURAL_HAZARD", "D$ read-modify-write structural hazard")
ccover(pstore1_valid && pstore_drain_on_miss, "STORE_DRAIN_ON_MISS", "D$ store buffer drain on miss")
ccover(s1_valid_not_nacked && s1_waw_hazard, "WAW_HAZARD", "D$ write-after-write hazard")
def should_pstore_drain(truly: Bool) = {
val s2_kill = truly && io.cpu.s2_kill
!pstore1_merge_likely &&
(usingRMW.B && pstore_drain_structural ||
(((pstore1_valid_not_rmw(s2_kill) && !pstore1_rmw) || pstore2_valid) && (pstore_drain_opportunistic || pstore_drain_on_miss)))
}
val pstore_drain = should_pstore_drain(true.B)
pstore1_held := (s2_store_valid && !s2_store_merge || pstore1_held) && pstore2_valid && !pstore_drain
val advance_pstore1 = (pstore1_valid || s2_valid_correct) && (pstore2_valid === pstore_drain)
pstore2_valid := pstore2_valid && !pstore_drain || advance_pstore1
val pstore2_addr = RegEnable(Mux(s2_correct, s2_vaddr, pstore1_addr), advance_pstore1)
val pstore2_way = RegEnable(Mux(s2_correct, s2_hit_way, pstore1_way), advance_pstore1)
val pstore2_storegen_data = {
for (i <- 0 until wordBytes)
yield RegEnable(pstore1_storegen_data(8*(i+1)-1, 8*i), advance_pstore1 || pstore1_merge && pstore1_mask(i))
}.asUInt
val pstore2_storegen_mask = {
val mask = Reg(UInt(wordBytes.W))
when (advance_pstore1 || pstore1_merge) {
val mergedMask = pstore1_mask | Mux(pstore1_merge, mask, 0.U)
mask := ~Mux(s2_correct, 0.U, ~mergedMask)
}
mask
}
s2_store_merge := (if (eccBytes == 1) false.B else {
ccover(pstore1_merge, "STORE_MERGED", "D$ store merged")
// only merge stores to ECC granules that are already stored-to, to avoid
// WAW hazards
val wordMatch = (eccMask(pstore2_storegen_mask) | ~eccMask(pstore1_mask)).andR
val idxMatch = s2_vaddr(untagBits-1, log2Ceil(wordBytes)) === pstore2_addr(untagBits-1, log2Ceil(wordBytes))
val tagMatch = (s2_hit_way & pstore2_way).orR
pstore2_valid && wordMatch && idxMatch && tagMatch
})
dataArb.io.in(0).valid := should_pstore_drain(false.B)
dataArb.io.in(0).bits.write := pstore_drain
dataArb.io.in(0).bits.addr := Mux(pstore2_valid, pstore2_addr, pstore1_addr)
dataArb.io.in(0).bits.way_en := Mux(pstore2_valid, pstore2_way, pstore1_way)
dataArb.io.in(0).bits.wdata := encodeData(Fill(rowWords, Mux(pstore2_valid, pstore2_storegen_data, pstore1_data)), false.B)
dataArb.io.in(0).bits.wordMask := {
val eccMask = dataArb.io.in(0).bits.eccMask.asBools.grouped(subWordBytes/eccBytes).map(_.orR).toSeq.asUInt
val wordMask = UIntToOH(Mux(pstore2_valid, pstore2_addr, pstore1_addr).extract(rowOffBits-1, wordBytes.log2))
FillInterleaved(wordBytes/subWordBytes, wordMask) & Fill(rowBytes/wordBytes, eccMask)
}
dataArb.io.in(0).bits.eccMask := eccMask(Mux(pstore2_valid, pstore2_storegen_mask, pstore1_mask))
// store->load RAW hazard detection
def s1Depends(addr: UInt, mask: UInt) =
addr(idxMSB, wordOffBits) === s1_vaddr(idxMSB, wordOffBits) &&
Mux(s1_write, (eccByteMask(mask) & eccByteMask(s1_mask_xwr)).orR, (mask & s1_mask_xwr).orR)
val s1_hazard =
(pstore1_valid_likely && s1Depends(pstore1_addr, pstore1_mask)) ||
(pstore2_valid && s1Depends(pstore2_addr, pstore2_storegen_mask))
val s1_raw_hazard = s1_read && s1_hazard
s1_waw_hazard := (if (eccBytes == 1) false.B else {
ccover(s1_valid_not_nacked && s1_waw_hazard, "WAW_HAZARD", "D$ write-after-write hazard")
s1_write && (s1_hazard || needsRead(s1_req) && !s1_did_read)
})
when (s1_valid && s1_raw_hazard) { s1_nack := true.B }
// performance hints to processor
io.cpu.s2_nack_cause_raw := RegNext(s1_raw_hazard) || !(!s2_waw_hazard || s2_store_merge)
// Prepare a TileLink request message that initiates a transaction
val a_source = PriorityEncoder(~uncachedInFlight.asUInt << mmioOffset) // skip the MSHR
val acquire_address = (s2_req.addr >> idxLSB) << idxLSB
val access_address = s2_req.addr
val a_size = s2_req.size
val a_data = Fill(beatWords, pstore1_data)
val a_mask = pstore1_mask << (access_address.extract(beatBytes.log2-1, wordBytes.log2) << 3)
val get = edge.Get(a_source, access_address, a_size)._2
val put = edge.Put(a_source, access_address, a_size, a_data)._2
val putpartial = edge.Put(a_source, access_address, a_size, a_data, a_mask)._2
val atomics = if (edge.manager.anySupportLogical) {
MuxLookup(s2_req.cmd, WireDefault(0.U.asTypeOf(new TLBundleA(edge.bundle))))(Array(
M_XA_SWAP -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.SWAP)._2,
M_XA_XOR -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.XOR) ._2,
M_XA_OR -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.OR) ._2,
M_XA_AND -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.AND) ._2,
M_XA_ADD -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.ADD)._2,
M_XA_MIN -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MIN)._2,
M_XA_MAX -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAX)._2,
M_XA_MINU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MINU)._2,
M_XA_MAXU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAXU)._2))
} else {
// If no managers support atomics, assert fail if processor asks for them
assert (!(tl_out_a.valid && s2_read && s2_write && s2_uncached))
WireDefault(new TLBundleA(edge.bundle), DontCare)
}
tl_out_a.valid := !io.cpu.s2_kill &&
(s2_valid_uncached_pending ||
(s2_valid_cached_miss &&
!(release_ack_wait && (s2_req.addr ^ release_ack_addr)(((pgIdxBits + pgLevelBits) min paddrBits) - 1, idxLSB) === 0.U) &&
(cacheParams.acquireBeforeRelease.B && !release_ack_wait && release_queue_empty || !s2_victim_dirty)))
tl_out_a.bits := Mux(!s2_uncached, acquire(s2_vaddr, s2_req.addr, s2_grow_param),
Mux(!s2_write, get,
Mux(s2_req.cmd === M_PWR, putpartial,
Mux(!s2_read, put, atomics))))
// Drive APROT Bits
tl_out_a.bits.user.lift(AMBAProt).foreach { x =>
val user_bit_cacheable = s2_pma.cacheable
x.privileged := s2_req.dprv === PRV.M.U || user_bit_cacheable
// if the address is cacheable, enable outer caches
x.bufferable := user_bit_cacheable
x.modifiable := user_bit_cacheable
x.readalloc := user_bit_cacheable
x.writealloc := user_bit_cacheable
// Following are always tied off
x.fetch := false.B
x.secure := true.B
}
// Set pending bits for outstanding TileLink transaction
val a_sel = UIntToOH(a_source, maxUncachedInFlight+mmioOffset) >> mmioOffset
when (tl_out_a.fire) {
when (s2_uncached) {
(a_sel.asBools zip (uncachedInFlight zip uncachedReqs)) foreach { case (s, (f, r)) =>
when (s) {
f := true.B
r := s2_req
r.cmd := Mux(s2_write, Mux(s2_req.cmd === M_PWR, M_PWR, M_XWR), M_XRD)
}
}
}.otherwise {
cached_grant_wait := true.B
refill_way := s2_victim_or_hit_way
}
}
// grant
val (d_first, d_last, d_done, d_address_inc) = edge.addr_inc(tl_out.d)
val (d_opc, grantIsUncached, grantIsUncachedData) = {
val uncachedGrantOpcodesSansData = Seq(AccessAck, HintAck)
val uncachedGrantOpcodesWithData = Seq(AccessAckData)
val uncachedGrantOpcodes = uncachedGrantOpcodesWithData ++ uncachedGrantOpcodesSansData
val whole_opc = tl_out.d.bits.opcode
if (usingDataScratchpad) {
assert(!tl_out.d.valid || whole_opc.isOneOf(uncachedGrantOpcodes))
// the only valid TL-D messages are uncached, so we can do some pruning
val opc = whole_opc(uncachedGrantOpcodes.map(_.getWidth).max - 1, 0)
val data = DecodeLogic(opc, uncachedGrantOpcodesWithData, uncachedGrantOpcodesSansData)
(opc, true.B, data)
} else {
(whole_opc, whole_opc.isOneOf(uncachedGrantOpcodes), whole_opc.isOneOf(uncachedGrantOpcodesWithData))
}
}
tl_d_data_encoded := encodeData(tl_out.d.bits.data, tl_out.d.bits.corrupt && !io.ptw.customCSRs.suppressCorruptOnGrantData && !grantIsUncached)
val grantIsCached = d_opc.isOneOf(Grant, GrantData)
val grantIsVoluntary = d_opc === ReleaseAck // Clears a different pending bit
val grantIsRefill = d_opc === GrantData // Writes the data array
val grantInProgress = RegInit(false.B)
val blockProbeAfterGrantCount = RegInit(0.U)
when (blockProbeAfterGrantCount > 0.U) { blockProbeAfterGrantCount := blockProbeAfterGrantCount - 1.U }
val canAcceptCachedGrant = !release_state.isOneOf(s_voluntary_writeback, s_voluntary_write_meta, s_voluntary_release)
tl_out.d.ready := Mux(grantIsCached, (!d_first || tl_out.e.ready) && canAcceptCachedGrant, true.B)
val uncachedRespIdxOH = UIntToOH(tl_out.d.bits.source, maxUncachedInFlight+mmioOffset) >> mmioOffset
uncachedResp := Mux1H(uncachedRespIdxOH, uncachedReqs)
when (tl_out.d.fire) {
when (grantIsCached) {
grantInProgress := true.B
assert(cached_grant_wait, "A GrantData was unexpected by the dcache.")
when(d_last) {
cached_grant_wait := false.B
grantInProgress := false.B
blockProbeAfterGrantCount := (blockProbeAfterGrantCycles - 1).U
replacer.miss
}
} .elsewhen (grantIsUncached) {
(uncachedRespIdxOH.asBools zip uncachedInFlight) foreach { case (s, f) =>
when (s && d_last) {
assert(f, "An AccessAck was unexpected by the dcache.") // TODO must handle Ack coming back on same cycle!
f := false.B
}
}
when (grantIsUncachedData) {
if (!cacheParams.separateUncachedResp) {
if (!cacheParams.pipelineWayMux)
s1_data_way := 1.U << nWays
s2_req.cmd := M_XRD
s2_req.size := uncachedResp.size
s2_req.signed := uncachedResp.signed
s2_req.tag := uncachedResp.tag
s2_req.addr := {
require(rowOffBits >= beatOffBits)
val dontCareBits = s1_paddr >> rowOffBits << rowOffBits
dontCareBits | uncachedResp.addr(beatOffBits-1, 0)
}
s2_uncached_resp_addr := uncachedResp.addr
}
}
} .elsewhen (grantIsVoluntary) {
assert(release_ack_wait, "A ReleaseAck was unexpected by the dcache.") // TODO should handle Ack coming back on same cycle!
release_ack_wait := false.B
}
}
// Finish TileLink transaction by issuing a GrantAck
tl_out.e.valid := tl_out.d.valid && d_first && grantIsCached && canAcceptCachedGrant
tl_out.e.bits := edge.GrantAck(tl_out.d.bits)
assert(tl_out.e.fire === (tl_out.d.fire && d_first && grantIsCached))
// data refill
// note this ready-valid signaling ignores E-channel backpressure, which
// benignly means the data RAM might occasionally be redundantly written
dataArb.io.in(1).valid := tl_out.d.valid && grantIsRefill && canAcceptCachedGrant
when (grantIsRefill && !dataArb.io.in(1).ready) {
tl_out.e.valid := false.B
tl_out.d.ready := false.B
}
if (!usingDataScratchpad) {
dataArb.io.in(1).bits.write := true.B
dataArb.io.in(1).bits.addr := (s2_vaddr >> idxLSB) << idxLSB | d_address_inc
dataArb.io.in(1).bits.way_en := refill_way
dataArb.io.in(1).bits.wdata := tl_d_data_encoded
dataArb.io.in(1).bits.wordMask := ~0.U((rowBytes / subWordBytes).W)
dataArb.io.in(1).bits.eccMask := ~0.U((wordBytes / eccBytes).W)
} else {
dataArb.io.in(1).bits := dataArb.io.in(0).bits
}
// tag updates on refill
// ignore backpressure from metaArb, which can only be caused by tag ECC
// errors on hit-under-miss. failing to write the new tag will leave the
// line invalid, so we'll simply request the line again later.
metaArb.io.in(3).valid := grantIsCached && d_done && !tl_out.d.bits.denied
metaArb.io.in(3).bits.write := true.B
metaArb.io.in(3).bits.way_en := refill_way
metaArb.io.in(3).bits.idx := s2_vaddr(idxMSB, idxLSB)
metaArb.io.in(3).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, s2_vaddr(idxMSB, 0))
metaArb.io.in(3).bits.data := tECC.encode(L1Metadata(s2_req.addr >> tagLSB, s2_hit_state.onGrant(s2_req.cmd, tl_out.d.bits.param)).asUInt)
if (!cacheParams.separateUncachedResp) {
// don't accept uncached grants if there's a structural hazard on s2_data...
val blockUncachedGrant = Reg(Bool())
blockUncachedGrant := dataArb.io.out.valid
when (grantIsUncachedData && (blockUncachedGrant || s1_valid)) {
tl_out.d.ready := false.B
// ...but insert bubble to guarantee grant's eventual forward progress
when (tl_out.d.valid) {
io.cpu.req.ready := false.B
dataArb.io.in(1).valid := true.B
dataArb.io.in(1).bits.write := false.B
blockUncachedGrant := !dataArb.io.in(1).ready
}
}
}
ccover(tl_out.d.valid && !tl_out.d.ready, "BLOCK_D", "D$ D-channel blocked")
// Handle an incoming TileLink Probe message
val block_probe_for_core_progress = blockProbeAfterGrantCount > 0.U || lrscValid
val block_probe_for_pending_release_ack = release_ack_wait && (tl_out.b.bits.address ^ release_ack_addr)(((pgIdxBits + pgLevelBits) min paddrBits) - 1, idxLSB) === 0.U
val block_probe_for_ordering = releaseInFlight || block_probe_for_pending_release_ack || grantInProgress
metaArb.io.in(6).valid := tl_out.b.valid && (!block_probe_for_core_progress || lrscBackingOff)
tl_out.b.ready := metaArb.io.in(6).ready && !(block_probe_for_core_progress || block_probe_for_ordering || s1_valid || s2_valid)
metaArb.io.in(6).bits.write := false.B
metaArb.io.in(6).bits.idx := probeIdx(tl_out.b.bits)
metaArb.io.in(6).bits.addr := Cat(io.cpu.req.bits.addr >> paddrBits, tl_out.b.bits.address)
metaArb.io.in(6).bits.way_en := metaArb.io.in(4).bits.way_en
metaArb.io.in(6).bits.data := metaArb.io.in(4).bits.data
// replacement policy
s1_victim_way := (if (replacer.perSet && nWays > 1) {
val repl_array = Mem(nSets, UInt(replacer.nBits.W))
val s1_repl_idx = s1_req.addr(idxBits+blockOffBits-1, blockOffBits)
val s2_repl_idx = s2_vaddr(idxBits+blockOffBits-1, blockOffBits)
val s2_repl_state = Reg(UInt(replacer.nBits.W))
val s2_new_repl_state = replacer.get_next_state(s2_repl_state, OHToUInt(s2_hit_way))
val s2_repl_wen = s2_valid_masked && s2_hit_way.orR && s2_repl_state =/= s2_new_repl_state
val s1_repl_state = Mux(s2_repl_wen && s2_repl_idx === s1_repl_idx, s2_new_repl_state, repl_array(s1_repl_idx))
when (s1_valid_not_nacked) { s2_repl_state := s1_repl_state }
val waddr = Mux(resetting, flushCounter(idxBits-1, 0), s2_repl_idx)
val wdata = Mux(resetting, 0.U, s2_new_repl_state)
val wen = resetting || s2_repl_wen
when (wen) { repl_array(waddr) := wdata }
replacer.get_replace_way(s1_repl_state)
} else {
replacer.way
})
// release
val (c_first, c_last, releaseDone, c_count) = edge.count(tl_out_c)
val releaseRejected = Wire(Bool())
val s1_release_data_valid = RegNext(dataArb.io.in(2).fire)
val s2_release_data_valid = RegNext(s1_release_data_valid && !releaseRejected)
releaseRejected := s2_release_data_valid && !tl_out_c.fire
val releaseDataBeat = Cat(0.U, c_count) + Mux(releaseRejected, 0.U, s1_release_data_valid + Cat(0.U, s2_release_data_valid))
val nackResponseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = TLPermissions.NtoN)
val cleanReleaseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = s2_report_param)
val dirtyReleaseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = s2_report_param, data = 0.U)
tl_out_c.valid := (s2_release_data_valid || (!cacheParams.silentDrop.B && release_state === s_voluntary_release)) && !(c_first && release_ack_wait)
tl_out_c.bits := nackResponseMessage
val newCoh = WireDefault(probeNewCoh)
releaseWay := s2_probe_way
if (!usingDataScratchpad) {
when (s2_victimize) {
assert(s2_valid_flush_line || s2_flush_valid || io.cpu.s2_nack)
val discard_line = s2_valid_flush_line && s2_req.size(1) || s2_flush_valid && flushing_req.size(1)
release_state := Mux(s2_victim_dirty && !discard_line, s_voluntary_writeback,
Mux(!cacheParams.silentDrop.B && !release_ack_wait && release_queue_empty && s2_victim_state.isValid() && (s2_valid_flush_line || s2_flush_valid || s2_readwrite && !s2_hit_valid), s_voluntary_release,
s_voluntary_write_meta))
probe_bits := addressToProbe(s2_vaddr, Cat(s2_victim_tag, s2_req.addr(tagLSB-1, idxLSB)) << idxLSB)
}
when (s2_probe) {
val probeNack = WireDefault(true.B)
when (s2_meta_error) {
release_state := s_probe_retry
}.elsewhen (s2_prb_ack_data) {
release_state := s_probe_rep_dirty
}.elsewhen (s2_probe_state.isValid()) {
tl_out_c.valid := true.B
tl_out_c.bits := cleanReleaseMessage
release_state := Mux(releaseDone, s_probe_write_meta, s_probe_rep_clean)
}.otherwise {
tl_out_c.valid := true.B
probeNack := !releaseDone
release_state := Mux(releaseDone, s_ready, s_probe_rep_miss)
}
when (probeNack) { s1_nack := true.B }
}
when (release_state === s_probe_retry) {
metaArb.io.in(6).valid := true.B
metaArb.io.in(6).bits.idx := probeIdx(probe_bits)
metaArb.io.in(6).bits.addr := Cat(io.cpu.req.bits.addr >> paddrBits, probe_bits.address)
when (metaArb.io.in(6).ready) {
release_state := s_ready
s1_probe := true.B
}
}
when (release_state === s_probe_rep_miss) {
tl_out_c.valid := true.B
when (releaseDone) { release_state := s_ready }
}
when (release_state === s_probe_rep_clean) {
tl_out_c.valid := true.B
tl_out_c.bits := cleanReleaseMessage
when (releaseDone) { release_state := s_probe_write_meta }
}
when (release_state === s_probe_rep_dirty) {
tl_out_c.bits := dirtyReleaseMessage
when (releaseDone) { release_state := s_probe_write_meta }
}
when (release_state.isOneOf(s_voluntary_writeback, s_voluntary_write_meta, s_voluntary_release)) {
when (release_state === s_voluntary_release) {
tl_out_c.bits := edge.Release(fromSource = 0.U,
toAddress = 0.U,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = s2_shrink_param)._2
}.otherwise {
tl_out_c.bits := edge.Release(fromSource = 0.U,
toAddress = 0.U,
lgSize = lgCacheBlockBytes.U,
shrinkPermissions = s2_shrink_param,
data = 0.U)._2
}
newCoh := voluntaryNewCoh
releaseWay := s2_victim_or_hit_way
when (releaseDone) { release_state := s_voluntary_write_meta }
when (tl_out_c.fire && c_first) {
release_ack_wait := true.B
release_ack_addr := probe_bits.address
}
}
tl_out_c.bits.source := probe_bits.source
tl_out_c.bits.address := probe_bits.address
tl_out_c.bits.data := s2_data_corrected
tl_out_c.bits.corrupt := inWriteback && s2_data_error_uncorrectable
}
tl_out_c.bits.user.lift(AMBAProt).foreach { x =>
x.fetch := false.B
x.secure := true.B
x.privileged := true.B
x.bufferable := true.B
x.modifiable := true.B
x.readalloc := true.B
x.writealloc := true.B
}
dataArb.io.in(2).valid := inWriteback && releaseDataBeat < refillCycles.U
dataArb.io.in(2).bits := dataArb.io.in(1).bits
dataArb.io.in(2).bits.write := false.B
dataArb.io.in(2).bits.addr := (probeIdx(probe_bits) << blockOffBits) | (releaseDataBeat(log2Up(refillCycles)-1,0) << rowOffBits)
dataArb.io.in(2).bits.wordMask := ~0.U((rowBytes / subWordBytes).W)
dataArb.io.in(2).bits.eccMask := ~0.U((wordBytes / eccBytes).W)
dataArb.io.in(2).bits.way_en := ~0.U(nWays.W)
metaArb.io.in(4).valid := release_state.isOneOf(s_voluntary_write_meta, s_probe_write_meta)
metaArb.io.in(4).bits.write := true.B
metaArb.io.in(4).bits.way_en := releaseWay
metaArb.io.in(4).bits.idx := probeIdx(probe_bits)
metaArb.io.in(4).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, probe_bits.address(idxMSB, 0))
metaArb.io.in(4).bits.data := tECC.encode(L1Metadata(tl_out_c.bits.address >> tagLSB, newCoh).asUInt)
when (metaArb.io.in(4).fire) { release_state := s_ready }
// cached response
(io.cpu.resp.bits: Data).waiveAll :<>= (s2_req: Data).waiveAll
io.cpu.resp.bits.has_data := s2_read
io.cpu.resp.bits.replay := false.B
io.cpu.s2_uncached := s2_uncached && !s2_hit
io.cpu.s2_paddr := s2_req.addr
io.cpu.s2_gpa := s2_tlb_xcpt.gpa
io.cpu.s2_gpa_is_pte := s2_tlb_xcpt.gpa_is_pte
// report whether there are any outstanding accesses. disregard any
// slave-port accesses, since they don't affect local memory ordering.
val s1_isSlavePortAccess = s1_req.no_xcpt
val s2_isSlavePortAccess = s2_req.no_xcpt
io.cpu.ordered := !(s1_valid && !s1_isSlavePortAccess || s2_valid && !s2_isSlavePortAccess || cached_grant_wait || uncachedInFlight.asUInt.orR)
io.cpu.store_pending := (cached_grant_wait && isWrite(s2_req.cmd)) || uncachedInFlight.asUInt.orR
val s1_xcpt_valid = tlb.io.req.valid && !s1_isSlavePortAccess && !s1_nack
io.cpu.s2_xcpt := Mux(RegNext(s1_xcpt_valid), s2_tlb_xcpt, 0.U.asTypeOf(s2_tlb_xcpt))
if (usingDataScratchpad) {
assert(!(s2_valid_masked && s2_req.cmd.isOneOf(M_XLR, M_XSC)))
} else {
ccover(tl_out.b.valid && !tl_out.b.ready, "BLOCK_B", "D$ B-channel blocked")
}
// uncached response
val s1_uncached_data_word = {
val word_idx = uncachedResp.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes))
val words = tl_out.d.bits.data.grouped(wordBits)
words(word_idx)
}
val s2_uncached_data_word = RegEnable(s1_uncached_data_word, io.cpu.replay_next)
val doUncachedResp = RegNext(io.cpu.replay_next)
io.cpu.resp.valid := (s2_valid_hit_pre_data_ecc || doUncachedResp) && !s2_data_error
io.cpu.replay_next := tl_out.d.fire && grantIsUncachedData && !cacheParams.separateUncachedResp.B
when (doUncachedResp) {
assert(!s2_valid_hit)
io.cpu.resp.bits.replay := true.B
io.cpu.resp.bits.addr := s2_uncached_resp_addr
}
io.cpu.uncached_resp.map { resp =>
resp.valid := tl_out.d.valid && grantIsUncachedData
resp.bits.tag := uncachedResp.tag
resp.bits.size := uncachedResp.size
resp.bits.signed := uncachedResp.signed
resp.bits.data := new LoadGen(uncachedResp.size, uncachedResp.signed, uncachedResp.addr, s1_uncached_data_word, false.B, wordBytes).data
resp.bits.data_raw := s1_uncached_data_word
when (grantIsUncachedData && !resp.ready) {
tl_out.d.ready := false.B
}
}
// load data subword mux/sign extension
val s2_data_word = (0 until rowBits by wordBits).map(i => s2_data_uncorrected(wordBits+i-1,i)).reduce(_|_)
val s2_data_word_corrected = (0 until rowBits by wordBits).map(i => s2_data_corrected(wordBits+i-1,i)).reduce(_|_)
val s2_data_word_possibly_uncached = Mux(cacheParams.pipelineWayMux.B && doUncachedResp, s2_uncached_data_word, 0.U) | s2_data_word
val loadgen = new LoadGen(s2_req.size, s2_req.signed, s2_req.addr, s2_data_word_possibly_uncached, s2_sc, wordBytes)
io.cpu.resp.bits.data := loadgen.data | s2_sc_fail
io.cpu.resp.bits.data_word_bypass := loadgen.wordData
io.cpu.resp.bits.data_raw := s2_data_word
io.cpu.resp.bits.store_data := pstore1_data
// AMOs
if (usingRMW) {
val amoalus = (0 until coreDataBits / xLen).map { i =>
val amoalu = Module(new AMOALU(xLen))
amoalu.io.mask := pstore1_mask >> (i * xBytes)
amoalu.io.cmd := (if (usingAtomicsInCache) pstore1_cmd else M_XWR)
amoalu.io.lhs := s2_data_word >> (i * xLen)
amoalu.io.rhs := pstore1_data >> (i * xLen)
amoalu
}
pstore1_storegen_data := (if (!usingDataScratchpad) amoalus.map(_.io.out).asUInt else {
val mask = FillInterleaved(8, Mux(s2_correct, 0.U, pstore1_mask))
amoalus.map(_.io.out_unmasked).asUInt & mask | s2_data_word_corrected & ~mask
})
} else if (!usingAtomics) {
assert(!(s1_valid_masked && s1_read && s1_write), "unsupported D$ operation")
}
if (coreParams.useVector) {
edge.manager.managers.foreach { m =>
// Statically ensure that no-allocate accesses are permitted.
// We could consider turning some of these into dynamic PMA checks.
require(!m.supportsAcquireB || m.supportsGet, "With a vector unit, cacheable memory must support Get")
require(!m.supportsAcquireT || m.supportsPutPartial, "With a vector unit, cacheable memory must support PutPartial")
}
}
// flushes
if (!usingDataScratchpad)
when (RegNext(reset.asBool)) { resetting := true.B }
val flushCounterNext = flushCounter +& 1.U
val flushDone = (flushCounterNext >> log2Ceil(nSets)) === nWays.U
val flushCounterWrap = flushCounterNext(log2Ceil(nSets)-1, 0)
ccover(s2_valid_masked && s2_cmd_flush_all && s2_meta_error, "TAG_ECC_ERROR_DURING_FENCE_I", "D$ ECC error in tag array during cache flush")
ccover(s2_valid_masked && s2_cmd_flush_all && s2_data_error, "DATA_ECC_ERROR_DURING_FENCE_I", "D$ ECC error in data array during cache flush")
s1_flush_valid := metaArb.io.in(5).fire && !s1_flush_valid && !s2_flush_valid_pre_tag_ecc && release_state === s_ready && !release_ack_wait
metaArb.io.in(5).valid := flushing && !flushed
metaArb.io.in(5).bits.write := false.B
metaArb.io.in(5).bits.idx := flushCounter(idxBits-1, 0)
metaArb.io.in(5).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaArb.io.in(5).bits.idx << blockOffBits)
metaArb.io.in(5).bits.way_en := metaArb.io.in(4).bits.way_en
metaArb.io.in(5).bits.data := metaArb.io.in(4).bits.data
// Only flush D$ on FENCE.I if some cached executable regions are untracked.
if (supports_flush) {
when (s2_valid_masked && s2_cmd_flush_all) {
when (!flushed && !io.cpu.s2_kill && !release_ack_wait && !uncachedInFlight.asUInt.orR) {
flushing := true.B
flushing_req := s2_req
}
}
when (tl_out_a.fire && !s2_uncached) { flushed := false.B }
when (flushing) {
s1_victim_way := flushCounter >> log2Up(nSets)
when (s2_flush_valid) {
flushCounter := flushCounterNext
when (flushDone) {
flushed := true.B
if (!isPow2(nWays)) flushCounter := flushCounterWrap
}
}
when (flushed && release_state === s_ready && !release_ack_wait) {
flushing := false.B
}
}
}
metaArb.io.in(0).valid := resetting
metaArb.io.in(0).bits := metaArb.io.in(5).bits
metaArb.io.in(0).bits.write := true.B
metaArb.io.in(0).bits.way_en := ~0.U(nWays.W)
metaArb.io.in(0).bits.data := tECC.encode(L1Metadata(0.U, ClientMetadata.onReset).asUInt)
when (resetting) {
flushCounter := flushCounterNext
when (flushDone) {
resetting := false.B
if (!isPow2(nWays)) flushCounter := flushCounterWrap
}
}
// gate the clock
clock_en_reg := !cacheParams.clockGate.B ||
io.ptw.customCSRs.disableDCacheClockGate ||
io.cpu.keep_clock_enabled ||
metaArb.io.out.valid || // subsumes resetting || flushing
s1_probe || s2_probe ||
s1_valid || s2_valid ||
io.tlb_port.req.valid ||
s1_tlb_req_valid || s2_tlb_req_valid ||
pstore1_held || pstore2_valid ||
release_state =/= s_ready ||
release_ack_wait || !release_queue_empty ||
!tlb.io.req.ready ||
cached_grant_wait || uncachedInFlight.asUInt.orR ||
lrscCount > 0.U || blockProbeAfterGrantCount > 0.U
// performance events
io.cpu.perf.acquire := edge.done(tl_out_a)
io.cpu.perf.release := edge.done(tl_out_c)
io.cpu.perf.grant := tl_out.d.valid && d_last
io.cpu.perf.tlbMiss := io.ptw.req.fire
io.cpu.perf.storeBufferEmptyAfterLoad := !(
(s1_valid && s1_write) ||
((s2_valid && s2_write && !s2_waw_hazard) || pstore1_held) ||
pstore2_valid)
io.cpu.perf.storeBufferEmptyAfterStore := !(
(s1_valid && s1_write) ||
(s2_valid && s2_write && pstore1_rmw) ||
((s2_valid && s2_write && !s2_waw_hazard || pstore1_held) && pstore2_valid))
io.cpu.perf.canAcceptStoreThenLoad := !(
((s2_valid && s2_write && pstore1_rmw) && (s1_valid && s1_write && !s1_waw_hazard)) ||
(pstore2_valid && pstore1_valid_likely && (s1_valid && s1_write)))
io.cpu.perf.canAcceptStoreThenRMW := io.cpu.perf.canAcceptStoreThenLoad && !pstore2_valid
io.cpu.perf.canAcceptLoadThenLoad := !((s1_valid && s1_write && needsRead(s1_req)) && ((s2_valid && s2_write && !s2_waw_hazard || pstore1_held) || pstore2_valid))
io.cpu.perf.blocked := {
// stop reporting blocked just before unblocking to avoid overly conservative stalling
val beatsBeforeEnd = outer.crossing match {
case SynchronousCrossing(_) => 2
case RationalCrossing(_) => 1 // assumes 1 < ratio <= 2; need more bookkeeping for optimal handling of >2
case _: AsynchronousCrossing => 1 // likewise
case _: CreditedCrossing => 1 // likewise
}
val near_end_of_refill = if (cacheBlockBytes / beatBytes <= beatsBeforeEnd) tl_out.d.valid else {
val refill_count = RegInit(0.U((cacheBlockBytes / beatBytes).log2.W))
when (tl_out.d.fire && grantIsRefill) { refill_count := refill_count + 1.U }
refill_count >= (cacheBlockBytes / beatBytes - beatsBeforeEnd).U
}
cached_grant_wait && !near_end_of_refill
}
// report errors
val (data_error, data_error_uncorrectable, data_error_addr) =
if (usingDataScratchpad) (s2_valid_data_error, s2_data_error_uncorrectable, s2_req.addr) else {
(RegNext(tl_out_c.fire && inWriteback && s2_data_error),
RegNext(s2_data_error_uncorrectable),
probe_bits.address) // This is stable for a cycle after tl_out_c.fire, so don't need a register
}
{
val error_addr =
Mux(metaArb.io.in(1).valid, Cat(s2_first_meta_corrected.tag, metaArb.io.in(1).bits.addr(tagLSB-1, idxLSB)),
data_error_addr >> idxLSB) << idxLSB
io.errors.uncorrectable.foreach { u =>
u.valid := metaArb.io.in(1).valid && s2_meta_error_uncorrectable || data_error && data_error_uncorrectable
u.bits := error_addr
}
io.errors.correctable.foreach { c =>
c.valid := metaArb.io.in(1).valid || data_error
c.bits := error_addr
io.errors.uncorrectable.foreach { u => when (u.valid) { c.valid := false.B } }
}
io.errors.bus.valid := tl_out.d.fire && (tl_out.d.bits.denied || tl_out.d.bits.corrupt)
io.errors.bus.bits := Mux(grantIsCached, s2_req.addr >> idxLSB << idxLSB, 0.U)
ccoverNotScratchpad(io.errors.bus.valid && grantIsCached, "D_ERROR_CACHED", "D$ D-channel error, cached")
ccover(io.errors.bus.valid && !grantIsCached, "D_ERROR_UNCACHED", "D$ D-channel error, uncached")
}
if (usingDataScratchpad) {
val data_error_cover = Seq(
property.CoverBoolean(!data_error, Seq("no_data_error")),
property.CoverBoolean(data_error && !data_error_uncorrectable, Seq("data_correctable_error")),
property.CoverBoolean(data_error && data_error_uncorrectable, Seq("data_uncorrectable_error")))
val request_source = Seq(
property.CoverBoolean(s2_isSlavePortAccess, Seq("from_TL")),
property.CoverBoolean(!s2_isSlavePortAccess, Seq("from_CPU")))
property.cover(new property.CrossProperty(
Seq(data_error_cover, request_source),
Seq(),
"MemorySystem;;Scratchpad Memory Bit Flip Cross Covers"))
} else {
val data_error_type = Seq(
property.CoverBoolean(!s2_valid_data_error, Seq("no_data_error")),
property.CoverBoolean(s2_valid_data_error && !s2_data_error_uncorrectable, Seq("data_correctable_error")),
property.CoverBoolean(s2_valid_data_error && s2_data_error_uncorrectable, Seq("data_uncorrectable_error")))
val data_error_dirty = Seq(
property.CoverBoolean(!s2_victim_dirty, Seq("data_clean")),
property.CoverBoolean(s2_victim_dirty, Seq("data_dirty")))
val request_source = if (supports_flush) {
Seq(
property.CoverBoolean(!flushing, Seq("access")),
property.CoverBoolean(flushing, Seq("during_flush")))
} else {
Seq(property.CoverBoolean(true.B, Seq("never_flush")))
}
val tag_error_cover = Seq(
property.CoverBoolean( !s2_meta_error, Seq("no_tag_error")),
property.CoverBoolean( s2_meta_error && !s2_meta_error_uncorrectable, Seq("tag_correctable_error")),
property.CoverBoolean( s2_meta_error && s2_meta_error_uncorrectable, Seq("tag_uncorrectable_error")))
property.cover(new property.CrossProperty(
Seq(data_error_type, data_error_dirty, request_source, tag_error_cover),
Seq(),
"MemorySystem;;Cache Memory Bit Flip Cross Covers"))
}
} // leaving gated-clock domain
val dcacheImpl = withClock (gated_clock) { new DCacheModuleImpl }
def encodeData(x: UInt, poison: Bool) = x.grouped(eccBits).map(dECC.encode(_, if (dECC.canDetect) poison else false.B)).asUInt
def dummyEncodeData(x: UInt) = x.grouped(eccBits).map(dECC.swizzle(_)).asUInt
def decodeData(x: UInt) = x.grouped(dECC.width(eccBits)).map(dECC.decode(_))
def eccMask(byteMask: UInt) = byteMask.grouped(eccBytes).map(_.orR).asUInt
def eccByteMask(byteMask: UInt) = FillInterleaved(eccBytes, eccMask(byteMask))
def likelyNeedsRead(req: HellaCacheReq) = {
val res = !req.cmd.isOneOf(M_XWR, M_PFW) || req.size < log2Ceil(eccBytes).U
assert(!needsRead(req) || res)
res
}
def needsRead(req: HellaCacheReq) =
isRead(req.cmd) ||
(isWrite(req.cmd) && (req.cmd === M_PWR || req.size < log2Ceil(eccBytes).U))
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"DCACHE_$label", "MemorySystem;;" + desc)
def ccoverNotScratchpad(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
if (!usingDataScratchpad) ccover(cond, label, desc)
require(!usingVM || tagLSB <= pgIdxBits, s"D$$ set size must not exceed ${1<<(pgIdxBits-10)} KiB; got ${(nSets * cacheBlockBytes)>>10} KiB")
def tagLSB: Int = untagBits
def probeIdx(b: TLBundleB): UInt = b.address(idxMSB, idxLSB)
def addressToProbe(vaddr: UInt, paddr: UInt): TLBundleB = {
val res = Wire(new TLBundleB(edge.bundle))
res :#= DontCare
res.address := paddr
res.source := (mmioOffset - 1).U
res
}
def acquire(vaddr: UInt, paddr: UInt, param: UInt): TLBundleA = {
if (!edge.manager.anySupportAcquireB) WireDefault(0.U.asTypeOf(new TLBundleA(edge.bundle)))
else edge.AcquireBlock(0.U, paddr >> lgCacheBlockBytes << lgCacheBlockBytes, lgCacheBlockBytes.U, param)._2
}
}
File DescribedSRAM.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module DCache( // @[DCache.scala:101:7]
input clock, // @[DCache.scala:101:7]
input reset, // @[DCache.scala:101:7]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_b_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_out_b_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_out_b_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_e_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_out_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output io_cpu_req_ready, // @[HellaCache.scala:243:14]
input io_cpu_req_valid, // @[HellaCache.scala:243:14]
input [39:0] io_cpu_req_bits_addr, // @[HellaCache.scala:243:14]
input [6:0] io_cpu_req_bits_tag, // @[HellaCache.scala:243:14]
input [4:0] io_cpu_req_bits_cmd, // @[HellaCache.scala:243:14]
input [1:0] io_cpu_req_bits_size, // @[HellaCache.scala:243:14]
input io_cpu_req_bits_signed, // @[HellaCache.scala:243:14]
input [1:0] io_cpu_req_bits_dprv, // @[HellaCache.scala:243:14]
input io_cpu_req_bits_dv, // @[HellaCache.scala:243:14]
input io_cpu_req_bits_phys, // @[HellaCache.scala:243:14]
input io_cpu_s1_kill, // @[HellaCache.scala:243:14]
input [63:0] io_cpu_s1_data_data, // @[HellaCache.scala:243:14]
input [7:0] io_cpu_s1_data_mask, // @[HellaCache.scala:243:14]
output io_cpu_s2_nack, // @[HellaCache.scala:243:14]
output io_cpu_resp_valid, // @[HellaCache.scala:243:14]
output [39:0] io_cpu_resp_bits_addr, // @[HellaCache.scala:243:14]
output [6:0] io_cpu_resp_bits_tag, // @[HellaCache.scala:243:14]
output [4:0] io_cpu_resp_bits_cmd, // @[HellaCache.scala:243:14]
output [1:0] io_cpu_resp_bits_size, // @[HellaCache.scala:243:14]
output io_cpu_resp_bits_signed, // @[HellaCache.scala:243:14]
output [1:0] io_cpu_resp_bits_dprv, // @[HellaCache.scala:243:14]
output io_cpu_resp_bits_dv, // @[HellaCache.scala:243:14]
output [63:0] io_cpu_resp_bits_data, // @[HellaCache.scala:243:14]
output [7:0] io_cpu_resp_bits_mask, // @[HellaCache.scala:243:14]
output io_cpu_resp_bits_replay, // @[HellaCache.scala:243:14]
output io_cpu_resp_bits_has_data, // @[HellaCache.scala:243:14]
output [63:0] io_cpu_resp_bits_data_word_bypass, // @[HellaCache.scala:243:14]
output [63:0] io_cpu_resp_bits_data_raw, // @[HellaCache.scala:243:14]
output [63:0] io_cpu_resp_bits_store_data, // @[HellaCache.scala:243:14]
output io_cpu_replay_next, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_ma_ld, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_ma_st, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_pf_ld, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_pf_st, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_ae_ld, // @[HellaCache.scala:243:14]
output io_cpu_s2_xcpt_ae_st, // @[HellaCache.scala:243:14]
output io_cpu_ordered, // @[HellaCache.scala:243:14]
output io_cpu_perf_release, // @[HellaCache.scala:243:14]
output io_cpu_perf_grant, // @[HellaCache.scala:243:14]
input io_ptw_req_ready, // @[HellaCache.scala:243:14]
output io_ptw_req_valid, // @[HellaCache.scala:243:14]
output [26:0] io_ptw_req_bits_bits_addr, // @[HellaCache.scala:243:14]
output io_ptw_req_bits_bits_need_gpa, // @[HellaCache.scala:243:14]
input io_ptw_resp_valid, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_ae_ptw, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_ae_final, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pf, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_gf, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_hr, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_hw, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_hx, // @[HellaCache.scala:243:14]
input [43:0] io_ptw_resp_bits_pte_ppn, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_d, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_a, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_g, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_u, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_x, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_w, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_r, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_pte_v, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_resp_bits_level, // @[HellaCache.scala:243:14]
input io_ptw_resp_bits_homogeneous, // @[HellaCache.scala:243:14]
input [3:0] io_ptw_ptbr_mode, // @[HellaCache.scala:243:14]
input io_ptw_status_debug, // @[HellaCache.scala:243:14]
input io_ptw_status_mxr, // @[HellaCache.scala:243:14]
input io_ptw_status_sum, // @[HellaCache.scala:243:14]
input io_ptw_pmp_0_cfg_l, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_pmp_0_cfg_a, // @[HellaCache.scala:243:14]
input io_ptw_pmp_0_cfg_x, // @[HellaCache.scala:243:14]
input io_ptw_pmp_0_cfg_w, // @[HellaCache.scala:243:14]
input io_ptw_pmp_0_cfg_r, // @[HellaCache.scala:243:14]
input [29:0] io_ptw_pmp_0_addr, // @[HellaCache.scala:243:14]
input [31:0] io_ptw_pmp_0_mask, // @[HellaCache.scala:243:14]
input io_ptw_pmp_1_cfg_l, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_pmp_1_cfg_a, // @[HellaCache.scala:243:14]
input io_ptw_pmp_1_cfg_x, // @[HellaCache.scala:243:14]
input io_ptw_pmp_1_cfg_w, // @[HellaCache.scala:243:14]
input io_ptw_pmp_1_cfg_r, // @[HellaCache.scala:243:14]
input [29:0] io_ptw_pmp_1_addr, // @[HellaCache.scala:243:14]
input [31:0] io_ptw_pmp_1_mask, // @[HellaCache.scala:243:14]
input io_ptw_pmp_2_cfg_l, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_pmp_2_cfg_a, // @[HellaCache.scala:243:14]
input io_ptw_pmp_2_cfg_x, // @[HellaCache.scala:243:14]
input io_ptw_pmp_2_cfg_w, // @[HellaCache.scala:243:14]
input io_ptw_pmp_2_cfg_r, // @[HellaCache.scala:243:14]
input [29:0] io_ptw_pmp_2_addr, // @[HellaCache.scala:243:14]
input [31:0] io_ptw_pmp_2_mask, // @[HellaCache.scala:243:14]
input io_ptw_pmp_3_cfg_l, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_pmp_3_cfg_a, // @[HellaCache.scala:243:14]
input io_ptw_pmp_3_cfg_x, // @[HellaCache.scala:243:14]
input io_ptw_pmp_3_cfg_w, // @[HellaCache.scala:243:14]
input io_ptw_pmp_3_cfg_r, // @[HellaCache.scala:243:14]
input [29:0] io_ptw_pmp_3_addr, // @[HellaCache.scala:243:14]
input [31:0] io_ptw_pmp_3_mask, // @[HellaCache.scala:243:14]
input io_ptw_pmp_4_cfg_l, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_pmp_4_cfg_a, // @[HellaCache.scala:243:14]
input io_ptw_pmp_4_cfg_x, // @[HellaCache.scala:243:14]
input io_ptw_pmp_4_cfg_w, // @[HellaCache.scala:243:14]
input io_ptw_pmp_4_cfg_r, // @[HellaCache.scala:243:14]
input [29:0] io_ptw_pmp_4_addr, // @[HellaCache.scala:243:14]
input [31:0] io_ptw_pmp_4_mask, // @[HellaCache.scala:243:14]
input io_ptw_pmp_5_cfg_l, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_pmp_5_cfg_a, // @[HellaCache.scala:243:14]
input io_ptw_pmp_5_cfg_x, // @[HellaCache.scala:243:14]
input io_ptw_pmp_5_cfg_w, // @[HellaCache.scala:243:14]
input io_ptw_pmp_5_cfg_r, // @[HellaCache.scala:243:14]
input [29:0] io_ptw_pmp_5_addr, // @[HellaCache.scala:243:14]
input [31:0] io_ptw_pmp_5_mask, // @[HellaCache.scala:243:14]
input io_ptw_pmp_6_cfg_l, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_pmp_6_cfg_a, // @[HellaCache.scala:243:14]
input io_ptw_pmp_6_cfg_x, // @[HellaCache.scala:243:14]
input io_ptw_pmp_6_cfg_w, // @[HellaCache.scala:243:14]
input io_ptw_pmp_6_cfg_r, // @[HellaCache.scala:243:14]
input [29:0] io_ptw_pmp_6_addr, // @[HellaCache.scala:243:14]
input [31:0] io_ptw_pmp_6_mask, // @[HellaCache.scala:243:14]
input io_ptw_pmp_7_cfg_l, // @[HellaCache.scala:243:14]
input [1:0] io_ptw_pmp_7_cfg_a, // @[HellaCache.scala:243:14]
input io_ptw_pmp_7_cfg_x, // @[HellaCache.scala:243:14]
input io_ptw_pmp_7_cfg_w, // @[HellaCache.scala:243:14]
input io_ptw_pmp_7_cfg_r, // @[HellaCache.scala:243:14]
input [29:0] io_ptw_pmp_7_addr, // @[HellaCache.scala:243:14]
input [31:0] io_ptw_pmp_7_mask // @[HellaCache.scala:243:14]
);
wire [5:0] metaArb_io_in_5_bits_idx; // @[DCache.scala:1017:44]
wire io_cpu_s2_xcpt_ma_st_0; // @[DCache.scala:933:24]
wire io_cpu_s2_xcpt_ma_ld_0; // @[DCache.scala:933:24]
wire io_cpu_s2_xcpt_ae_st_0; // @[DCache.scala:933:24]
wire io_cpu_s2_xcpt_ae_ld_0; // @[DCache.scala:933:24]
wire io_cpu_s2_xcpt_pf_st_0; // @[DCache.scala:933:24]
wire io_cpu_s2_xcpt_pf_ld_0; // @[DCache.scala:933:24]
wire [21:0] metaArb_io_in_7_bits_data; // @[DCache.scala:913:97]
wire metaArb_io_in_4_valid; // @[package.scala:81:59]
wire [11:0] dataArb_io_in_2_bits_addr; // @[DCache.scala:903:72]
wire dataArb_io_in_2_valid; // @[DCache.scala:900:41]
wire [7:0] releaseWay; // @[DCache.scala:813:14, :863:102, :877:18]
wire [3:0] nodeOut_c_bits_size; // @[DCache.scala:859:48, :863:102, :864:52]
wire [2:0] nodeOut_c_bits_opcode; // @[DCache.scala:859:48, :863:102, :864:52]
wire nodeOut_c_valid; // @[DCache.scala:810:18, :824:21, :826:28, :850:47, :851:22, :854:48, :855:22]
wire [5:0] metaArb_io_in_6_bits_idx; // @[DCache.scala:772:29, :841:44, :843:33]
wire metaArb_io_in_6_valid; // @[DCache.scala:769:26, :841:44, :842:30]
wire s1_nack; // @[DCache.scala:571:36, :824:21, :839:24]
wire dataArb_io_in_1_bits_write; // @[DCache.scala:727:33, :752:68, :755:29]
wire dataArb_io_in_1_valid; // @[DCache.scala:721:26, :752:68, :755:29, :757:32]
wire nodeOut_d_ready; // @[DCache.scala:671:18, :722:51, :724:20, :752:68, :753:22]
wire [21:0] metaArb_io_in_3_bits_data; // @[DCache.scala:746:134]
wire metaArb_io_in_3_valid; // @[Edges.scala:233:22]
wire [11:0] dataArb_io_in_1_bits_addr; // @[DCache.scala:728:32]
wire [7:0] dataArb_io_in_0_bits_eccMask; // @[DCache.scala:557:47]
wire [63:0] dataArb_io_in_0_bits_wdata; // @[DCache.scala:551:63]
wire [7:0] dataArb_io_in_0_bits_way_en; // @[DCache.scala:550:38]
wire [11:0] _dataArb_io_in_0_bits_wordMask_wordMask_T; // @[DCache.scala:549:36]
wire dataArb_io_in_0_valid; // @[DCache.scala:517:44]
wire pstore_drain; // @[DCache.scala:517:44]
wire [21:0] metaArb_io_in_2_bits_data; // @[DCache.scala:467:97]
wire [5:0] metaArb_io_in_3_bits_idx; // @[DCache.scala:453:76]
wire [5:0] metaArb_io_in_4_bits_idx; // @[DCache.scala:1200:47]
wire metaArb_io_in_2_valid; // @[DCache.scala:446:62]
wire [7:0] s2_victim_or_hit_way; // @[DCache.scala:432:33]
reg [39:0] s2_req_addr; // @[DCache.scala:339:19]
wire rockettile_dcache_tag_array_s1_meta_en; // @[DCache.scala:314:59]
wire rockettile_dcache_tag_array_MPORT_en; // @[DCache.scala:310:27]
wire [5:0] metaArb_io_in_7_bits_idx; // @[DCache.scala:263:58]
wire [11:0] dataArb_io_in_3_bits_addr; // @[DCache.scala:245:30]
wire dataArb_io_in_3_valid; // @[DCache.scala:242:46]
reg [7:0] refill_way; // @[DCache.scala:229:23]
reg resetting; // @[DCache.scala:224:26]
reg [1:0] s1_tlb_req_prv; // @[DCache.scala:208:29]
reg s1_tlb_req_passthrough; // @[DCache.scala:208:29]
reg [39:0] s1_tlb_req_vaddr; // @[DCache.scala:208:29]
wire [63:0] _amoalus_0_io_out; // @[DCache.scala:982:26]
wire [63:0] _data_io_resp_0; // @[DCache.scala:145:20]
wire [63:0] _data_io_resp_1; // @[DCache.scala:145:20]
wire [63:0] _data_io_resp_2; // @[DCache.scala:145:20]
wire [63:0] _data_io_resp_3; // @[DCache.scala:145:20]
wire [63:0] _data_io_resp_4; // @[DCache.scala:145:20]
wire [63:0] _data_io_resp_5; // @[DCache.scala:145:20]
wire [63:0] _data_io_resp_6; // @[DCache.scala:145:20]
wire [63:0] _data_io_resp_7; // @[DCache.scala:145:20]
wire [175:0] _rockettile_dcache_tag_array_RW0_rdata; // @[DescribedSRAM.scala:17:26]
wire _lfsr_prng_io_out_0; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_1; // @[PRNG.scala:91:22]
wire _lfsr_prng_io_out_2; // @[PRNG.scala:91:22]
wire [19:0] _tlb_entries_barrier_12_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_12_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_12_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_12_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_12_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_12_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_12_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_12_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_12_io_y_sr; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_11_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_11_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_10_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_10_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_9_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_9_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_8_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_8_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_7_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_7_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_6_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_6_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_5_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_5_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_4_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_4_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_3_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_3_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_2_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_2_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_1_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_1_io_y_c; // @[package.scala:267:25]
wire [19:0] _tlb_entries_barrier_io_y_ppn; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_u; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_ae_ptw; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_ae_final; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_pf; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_gf; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_sw; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_sx; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_sr; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_pw; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_pr; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_ppp; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_pal; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_paa; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_eff; // @[package.scala:267:25]
wire _tlb_entries_barrier_io_y_c; // @[package.scala:267:25]
wire _tlb_pma_io_resp_cacheable; // @[TLB.scala:422:19]
wire _tlb_pma_io_resp_r; // @[TLB.scala:422:19]
wire _tlb_pma_io_resp_w; // @[TLB.scala:422:19]
wire _tlb_pma_io_resp_pp; // @[TLB.scala:422:19]
wire _tlb_pma_io_resp_al; // @[TLB.scala:422:19]
wire _tlb_pma_io_resp_aa; // @[TLB.scala:422:19]
wire _tlb_pma_io_resp_x; // @[TLB.scala:422:19]
wire _tlb_pma_io_resp_eff; // @[TLB.scala:422:19]
wire _tlb_pmp_io_r; // @[TLB.scala:416:19]
wire _tlb_pmp_io_w; // @[TLB.scala:416:19]
wire _tlb_pmp_io_x; // @[TLB.scala:416:19]
wire [19:0] _tlb_mpu_ppn_barrier_io_y_ppn; // @[package.scala:267:25]
reg [26:0] tlb_sectored_entries_0_0_tag_vpn; // @[TLB.scala:339:29]
reg tlb_; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_0_data_0; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_0_data_1; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_0_data_2; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_0_data_3; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_0_valid_0; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_0_valid_1; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_0_valid_2; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_0_valid_3; // @[TLB.scala:339:29]
reg [26:0] tlb_sectored_entries_0_1_tag_vpn; // @[TLB.scala:339:29]
reg tlb__0; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_1_data_0; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_1_data_1; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_1_data_2; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_1_data_3; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_1_valid_0; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_1_valid_1; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_1_valid_2; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_1_valid_3; // @[TLB.scala:339:29]
reg [26:0] tlb_sectored_entries_0_2_tag_vpn; // @[TLB.scala:339:29]
reg tlb__1; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_2_data_0; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_2_data_1; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_2_data_2; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_2_data_3; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_2_valid_0; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_2_valid_1; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_2_valid_2; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_2_valid_3; // @[TLB.scala:339:29]
reg [26:0] tlb_sectored_entries_0_3_tag_vpn; // @[TLB.scala:339:29]
reg tlb__2; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_3_data_0; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_3_data_1; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_3_data_2; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_3_data_3; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_3_valid_0; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_3_valid_1; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_3_valid_2; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_3_valid_3; // @[TLB.scala:339:29]
reg [26:0] tlb_sectored_entries_0_4_tag_vpn; // @[TLB.scala:339:29]
reg tlb__3; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_4_data_0; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_4_data_1; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_4_data_2; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_4_data_3; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_4_valid_0; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_4_valid_1; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_4_valid_2; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_4_valid_3; // @[TLB.scala:339:29]
reg [26:0] tlb_sectored_entries_0_5_tag_vpn; // @[TLB.scala:339:29]
reg tlb__4; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_5_data_0; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_5_data_1; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_5_data_2; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_5_data_3; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_5_valid_0; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_5_valid_1; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_5_valid_2; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_5_valid_3; // @[TLB.scala:339:29]
reg [26:0] tlb_sectored_entries_0_6_tag_vpn; // @[TLB.scala:339:29]
reg tlb__5; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_6_data_0; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_6_data_1; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_6_data_2; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_6_data_3; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_6_valid_0; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_6_valid_1; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_6_valid_2; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_6_valid_3; // @[TLB.scala:339:29]
reg [26:0] tlb_sectored_entries_0_7_tag_vpn; // @[TLB.scala:339:29]
reg tlb__6; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_7_data_0; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_7_data_1; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_7_data_2; // @[TLB.scala:339:29]
reg [41:0] tlb_sectored_entries_0_7_data_3; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_7_valid_0; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_7_valid_1; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_7_valid_2; // @[TLB.scala:339:29]
reg tlb_sectored_entries_0_7_valid_3; // @[TLB.scala:339:29]
reg [1:0] tlb_superpage_entries_0_level; // @[TLB.scala:341:30]
reg [26:0] tlb_superpage_entries_0_tag_vpn; // @[TLB.scala:341:30]
reg tlb__7; // @[TLB.scala:341:30]
reg [41:0] tlb_superpage_entries_0_data_0; // @[TLB.scala:341:30]
reg tlb_superpage_entries_0_valid_0; // @[TLB.scala:341:30]
reg [1:0] tlb_superpage_entries_1_level; // @[TLB.scala:341:30]
reg [26:0] tlb_superpage_entries_1_tag_vpn; // @[TLB.scala:341:30]
reg tlb__8; // @[TLB.scala:341:30]
reg [41:0] tlb_superpage_entries_1_data_0; // @[TLB.scala:341:30]
reg tlb_superpage_entries_1_valid_0; // @[TLB.scala:341:30]
reg [1:0] tlb_superpage_entries_2_level; // @[TLB.scala:341:30]
reg [26:0] tlb_superpage_entries_2_tag_vpn; // @[TLB.scala:341:30]
reg tlb__9; // @[TLB.scala:341:30]
reg [41:0] tlb_superpage_entries_2_data_0; // @[TLB.scala:341:30]
reg tlb_superpage_entries_2_valid_0; // @[TLB.scala:341:30]
reg [1:0] tlb_superpage_entries_3_level; // @[TLB.scala:341:30]
reg [26:0] tlb_superpage_entries_3_tag_vpn; // @[TLB.scala:341:30]
reg tlb__10; // @[TLB.scala:341:30]
reg [41:0] tlb_superpage_entries_3_data_0; // @[TLB.scala:341:30]
reg tlb_superpage_entries_3_valid_0; // @[TLB.scala:341:30]
reg [1:0] tlb_special_entry_level; // @[TLB.scala:346:56]
reg [26:0] tlb_special_entry_tag_vpn; // @[TLB.scala:346:56]
reg [41:0] tlb_special_entry_data_0; // @[TLB.scala:346:56]
reg tlb_special_entry_valid_0; // @[TLB.scala:346:56]
reg [1:0] tlb_state; // @[TLB.scala:352:22]
reg [26:0] tlb_r_refill_tag; // @[TLB.scala:354:25]
reg [1:0] tlb_waddr; // @[TLB.scala:355:34]
reg [2:0] tlb_r_sectored_repl_addr; // @[TLB.scala:356:33]
reg tlb_r_sectored_hit_valid; // @[TLB.scala:357:27]
reg [2:0] tlb_r_sectored_hit_bits; // @[TLB.scala:357:27]
reg tlb_r_need_gpa; // @[TLB.scala:361:23]
wire tlb_vm_enabled = io_ptw_ptbr_mode[3] & ~(s1_tlb_req_prv[1]) & ~s1_tlb_req_passthrough; // @[TLB.scala:372:27, :374:41, :399:{45,61,64}]
wire tlb__11 = tlb_state == 2'h1; // @[package.scala:16:47]
wire tlb_ignore_13 = tlb_special_entry_level == 2'h0; // @[TLB.scala:197:28, :346:56]
wire [27:0] tlb_mpu_ppn = io_ptw_resp_valid ? {8'h0, io_ptw_resp_bits_pte_ppn[19:0]} : tlb_vm_enabled ? {8'h0, _tlb_mpu_ppn_barrier_io_y_ppn[19:18], (tlb_ignore_13 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_mpu_ppn_barrier_io_y_ppn[17:9], (tlb_special_entry_level[1] ? 9'h0 : s1_tlb_req_vaddr[20:12]) | _tlb_mpu_ppn_barrier_io_y_ppn[8:0]} : s1_tlb_req_vaddr[39:12]; // @[package.scala:267:25]
wire [2:0] tlb_mpu_priv = io_ptw_resp_valid | s1_tlb_req_passthrough ? 3'h1 : {io_ptw_status_debug, s1_tlb_req_prv}; // @[TLB.scala:415:{27,52,103}]
wire [24:0] tlb__12 = tlb_sectored_entries_0_0_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29]
wire [24:0] tlb__13 = tlb_sectored_entries_0_1_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29]
wire [24:0] tlb__14 = tlb_sectored_entries_0_2_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29]
wire [24:0] tlb__15 = tlb_sectored_entries_0_3_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29]
wire [24:0] tlb__16 = tlb_sectored_entries_0_4_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29]
wire [24:0] tlb__17 = tlb_sectored_entries_0_5_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29]
wire [24:0] tlb__18 = tlb_sectored_entries_0_6_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29]
wire [24:0] tlb__19 = tlb_sectored_entries_0_7_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29]
wire [17:0] tlb__20 = tlb_superpage_entries_0_tag_vpn[26:9] ^ s1_tlb_req_vaddr[38:21]; // @[TLB.scala:183:52, :335:30, :341:30]
wire tlb_ignore_1 = tlb_superpage_entries_0_level == 2'h0; // @[TLB.scala:182:28, :341:30]
wire [17:0] tlb__21 = tlb_superpage_entries_1_tag_vpn[26:9] ^ s1_tlb_req_vaddr[38:21]; // @[TLB.scala:183:52, :335:30, :341:30]
wire tlb_ignore_4 = tlb_superpage_entries_1_level == 2'h0; // @[TLB.scala:182:28, :341:30]
wire [17:0] tlb__22 = tlb_superpage_entries_2_tag_vpn[26:9] ^ s1_tlb_req_vaddr[38:21]; // @[TLB.scala:183:52, :335:30, :341:30]
wire tlb_ignore_7 = tlb_superpage_entries_2_level == 2'h0; // @[TLB.scala:182:28, :341:30]
wire [17:0] tlb__23 = tlb_superpage_entries_3_tag_vpn[26:9] ^ s1_tlb_req_vaddr[38:21]; // @[TLB.scala:183:52, :335:30, :341:30]
wire tlb_ignore_10 = tlb_superpage_entries_3_level == 2'h0; // @[TLB.scala:182:28, :341:30]
wire [3:0] _GEN = {{tlb_sectored_entries_0_0_valid_3}, {tlb_sectored_entries_0_0_valid_2}, {tlb_sectored_entries_0_0_valid_1}, {tlb_sectored_entries_0_0_valid_0}}; // @[TLB.scala:188:18, :339:29]
wire tlb_hitsVec_0 = tlb_vm_enabled & _GEN[s1_tlb_req_vaddr[13:12]] & tlb__12 == 25'h0 & ~tlb_; // @[package.scala:163:13]
wire [3:0] _GEN_0 = {{tlb_sectored_entries_0_1_valid_3}, {tlb_sectored_entries_0_1_valid_2}, {tlb_sectored_entries_0_1_valid_1}, {tlb_sectored_entries_0_1_valid_0}}; // @[TLB.scala:188:18, :339:29]
wire tlb_hitsVec_1 = tlb_vm_enabled & _GEN_0[s1_tlb_req_vaddr[13:12]] & tlb__13 == 25'h0 & ~tlb__0; // @[package.scala:163:13]
wire [3:0] _GEN_1 = {{tlb_sectored_entries_0_2_valid_3}, {tlb_sectored_entries_0_2_valid_2}, {tlb_sectored_entries_0_2_valid_1}, {tlb_sectored_entries_0_2_valid_0}}; // @[TLB.scala:188:18, :339:29]
wire tlb_hitsVec_2 = tlb_vm_enabled & _GEN_1[s1_tlb_req_vaddr[13:12]] & tlb__14 == 25'h0 & ~tlb__1; // @[package.scala:163:13]
wire [3:0] _GEN_2 = {{tlb_sectored_entries_0_3_valid_3}, {tlb_sectored_entries_0_3_valid_2}, {tlb_sectored_entries_0_3_valid_1}, {tlb_sectored_entries_0_3_valid_0}}; // @[TLB.scala:188:18, :339:29]
wire tlb_hitsVec_3 = tlb_vm_enabled & _GEN_2[s1_tlb_req_vaddr[13:12]] & tlb__15 == 25'h0 & ~tlb__2; // @[package.scala:163:13]
wire [3:0] _GEN_3 = {{tlb_sectored_entries_0_4_valid_3}, {tlb_sectored_entries_0_4_valid_2}, {tlb_sectored_entries_0_4_valid_1}, {tlb_sectored_entries_0_4_valid_0}}; // @[TLB.scala:188:18, :339:29]
wire tlb_hitsVec_4 = tlb_vm_enabled & _GEN_3[s1_tlb_req_vaddr[13:12]] & tlb__16 == 25'h0 & ~tlb__3; // @[package.scala:163:13]
wire [3:0] _GEN_4 = {{tlb_sectored_entries_0_5_valid_3}, {tlb_sectored_entries_0_5_valid_2}, {tlb_sectored_entries_0_5_valid_1}, {tlb_sectored_entries_0_5_valid_0}}; // @[TLB.scala:188:18, :339:29]
wire tlb_hitsVec_5 = tlb_vm_enabled & _GEN_4[s1_tlb_req_vaddr[13:12]] & tlb__17 == 25'h0 & ~tlb__4; // @[package.scala:163:13]
wire [3:0] _GEN_5 = {{tlb_sectored_entries_0_6_valid_3}, {tlb_sectored_entries_0_6_valid_2}, {tlb_sectored_entries_0_6_valid_1}, {tlb_sectored_entries_0_6_valid_0}}; // @[TLB.scala:188:18, :339:29]
wire tlb_hitsVec_6 = tlb_vm_enabled & _GEN_5[s1_tlb_req_vaddr[13:12]] & tlb__18 == 25'h0 & ~tlb__5; // @[package.scala:163:13]
wire [3:0] _GEN_6 = {{tlb_sectored_entries_0_7_valid_3}, {tlb_sectored_entries_0_7_valid_2}, {tlb_sectored_entries_0_7_valid_1}, {tlb_sectored_entries_0_7_valid_0}}; // @[TLB.scala:188:18, :339:29]
wire tlb_hitsVec_7 = tlb_vm_enabled & _GEN_6[s1_tlb_req_vaddr[13:12]] & tlb__19 == 25'h0 & ~tlb__6; // @[package.scala:163:13]
wire tlb_hitsVec_8 = tlb_vm_enabled & tlb_superpage_entries_0_valid_0 & ~tlb__7 & tlb__20[17:9] == 9'h0 & (tlb_ignore_1 | tlb__20[8:0] == 9'h0); // @[TLB.scala:178:{33,43}, :182:28, :183:{29,40,52,58,79}, :341:30, :399:{45,61}, :440:44]
wire tlb_hitsVec_9 = tlb_vm_enabled & tlb_superpage_entries_1_valid_0 & ~tlb__8 & tlb__21[17:9] == 9'h0 & (tlb_ignore_4 | tlb__21[8:0] == 9'h0); // @[TLB.scala:178:{33,43}, :182:28, :183:{29,40,52,58,79}, :341:30, :399:{45,61}, :440:44]
wire tlb_hitsVec_10 = tlb_vm_enabled & tlb_superpage_entries_2_valid_0 & ~tlb__9 & tlb__22[17:9] == 9'h0 & (tlb_ignore_7 | tlb__22[8:0] == 9'h0); // @[TLB.scala:178:{33,43}, :182:28, :183:{29,40,52,58,79}, :341:30, :399:{45,61}, :440:44]
wire tlb_hitsVec_11 = tlb_vm_enabled & tlb_superpage_entries_3_valid_0 & ~tlb__10 & tlb__23[17:9] == 9'h0 & (tlb_ignore_10 | tlb__23[8:0] == 9'h0); // @[TLB.scala:178:{33,43}, :182:28, :183:{29,40,52,58,79}, :341:30, :399:{45,61}, :440:44]
wire [26:0] tlb__24 = tlb_special_entry_tag_vpn ^ s1_tlb_req_vaddr[38:12]; // @[TLB.scala:183:52, :335:30, :346:56]
wire tlb_hitsVec_12 = tlb_vm_enabled & tlb_special_entry_valid_0 & tlb__24[26:18] == 9'h0 & (tlb_ignore_13 | tlb__24[17:9] == 9'h0) & (~(tlb_special_entry_level[1]) | tlb__24[8:0] == 9'h0); // @[TLB.scala:183:{29,40,52,58,79}, :197:28, :346:56, :399:{45,61}, :440:44]
wire [12:0] tlb_real_hits = {tlb_hitsVec_12, tlb_hitsVec_11, tlb_hitsVec_10, tlb_hitsVec_9, tlb_hitsVec_8, tlb_hitsVec_7, tlb_hitsVec_6, tlb_hitsVec_5, tlb_hitsVec_4, tlb_hitsVec_3, tlb_hitsVec_2, tlb_hitsVec_1, tlb_hitsVec_0}; // @[package.scala:45:27]
wire [3:0][41:0] _GEN_7 = {{tlb_sectored_entries_0_0_data_3}, {tlb_sectored_entries_0_0_data_2}, {tlb_sectored_entries_0_0_data_1}, {tlb_sectored_entries_0_0_data_0}}; // @[TLB.scala:170:77, :339:29]
wire [41:0] tlb__entries_WIRE_1 = _GEN_7[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13]
wire [3:0][41:0] _GEN_8 = {{tlb_sectored_entries_0_1_data_3}, {tlb_sectored_entries_0_1_data_2}, {tlb_sectored_entries_0_1_data_1}, {tlb_sectored_entries_0_1_data_0}}; // @[TLB.scala:170:77, :339:29]
wire [41:0] tlb__entries_WIRE_3 = _GEN_8[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13]
wire [3:0][41:0] _GEN_9 = {{tlb_sectored_entries_0_2_data_3}, {tlb_sectored_entries_0_2_data_2}, {tlb_sectored_entries_0_2_data_1}, {tlb_sectored_entries_0_2_data_0}}; // @[TLB.scala:170:77, :339:29]
wire [41:0] tlb__entries_WIRE_5 = _GEN_9[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13]
wire [3:0][41:0] _GEN_10 = {{tlb_sectored_entries_0_3_data_3}, {tlb_sectored_entries_0_3_data_2}, {tlb_sectored_entries_0_3_data_1}, {tlb_sectored_entries_0_3_data_0}}; // @[TLB.scala:170:77, :339:29]
wire [41:0] tlb__entries_WIRE_7 = _GEN_10[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13]
wire [3:0][41:0] _GEN_11 = {{tlb_sectored_entries_0_4_data_3}, {tlb_sectored_entries_0_4_data_2}, {tlb_sectored_entries_0_4_data_1}, {tlb_sectored_entries_0_4_data_0}}; // @[TLB.scala:170:77, :339:29]
wire [41:0] tlb__entries_WIRE_9 = _GEN_11[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13]
wire [3:0][41:0] _GEN_12 = {{tlb_sectored_entries_0_5_data_3}, {tlb_sectored_entries_0_5_data_2}, {tlb_sectored_entries_0_5_data_1}, {tlb_sectored_entries_0_5_data_0}}; // @[TLB.scala:170:77, :339:29]
wire [41:0] tlb__entries_WIRE_11 = _GEN_12[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13]
wire [3:0][41:0] _GEN_13 = {{tlb_sectored_entries_0_6_data_3}, {tlb_sectored_entries_0_6_data_2}, {tlb_sectored_entries_0_6_data_1}, {tlb_sectored_entries_0_6_data_0}}; // @[TLB.scala:170:77, :339:29]
wire [41:0] tlb__entries_WIRE_13 = _GEN_13[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13]
wire [3:0][41:0] _GEN_14 = {{tlb_sectored_entries_0_7_data_3}, {tlb_sectored_entries_0_7_data_2}, {tlb_sectored_entries_0_7_data_1}, {tlb_sectored_entries_0_7_data_0}}; // @[TLB.scala:170:77, :339:29]
wire [41:0] tlb__entries_WIRE_15 = _GEN_14[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13]
wire [19:0] tlb_ppn = (tlb_hitsVec_0 ? _tlb_entries_barrier_io_y_ppn : 20'h0) | (tlb_hitsVec_1 ? _tlb_entries_barrier_1_io_y_ppn : 20'h0) | (tlb_hitsVec_2 ? _tlb_entries_barrier_2_io_y_ppn : 20'h0) | (tlb_hitsVec_3 ? _tlb_entries_barrier_3_io_y_ppn : 20'h0) | (tlb_hitsVec_4 ? _tlb_entries_barrier_4_io_y_ppn : 20'h0) | (tlb_hitsVec_5 ? _tlb_entries_barrier_5_io_y_ppn : 20'h0) | (tlb_hitsVec_6 ? _tlb_entries_barrier_6_io_y_ppn : 20'h0) | (tlb_hitsVec_7 ? _tlb_entries_barrier_7_io_y_ppn : 20'h0) | (tlb_hitsVec_8 ? {_tlb_entries_barrier_8_io_y_ppn[19:18], (tlb_ignore_1 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_entries_barrier_8_io_y_ppn[17:9], s1_tlb_req_vaddr[20:12] | _tlb_entries_barrier_8_io_y_ppn[8:0]} : 20'h0) | (tlb_hitsVec_9 ? {_tlb_entries_barrier_9_io_y_ppn[19:18], (tlb_ignore_4 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_entries_barrier_9_io_y_ppn[17:9], s1_tlb_req_vaddr[20:12] | _tlb_entries_barrier_9_io_y_ppn[8:0]} : 20'h0) | (tlb_hitsVec_10 ? {_tlb_entries_barrier_10_io_y_ppn[19:18], (tlb_ignore_7 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_entries_barrier_10_io_y_ppn[17:9], s1_tlb_req_vaddr[20:12] | _tlb_entries_barrier_10_io_y_ppn[8:0]} : 20'h0) | (tlb_hitsVec_11 ? {_tlb_entries_barrier_11_io_y_ppn[19:18], (tlb_ignore_10 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_entries_barrier_11_io_y_ppn[17:9], s1_tlb_req_vaddr[20:12] | _tlb_entries_barrier_11_io_y_ppn[8:0]} : 20'h0) | (tlb_hitsVec_12 ? {_tlb_entries_barrier_12_io_y_ppn[19:18], (tlb_ignore_13 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_entries_barrier_12_io_y_ppn[17:9], (tlb_special_entry_level[1] ? 9'h0 : s1_tlb_req_vaddr[20:12]) | _tlb_entries_barrier_12_io_y_ppn[8:0]} : 20'h0) | (tlb_vm_enabled ? 20'h0 : s1_tlb_req_vaddr[31:12]); // @[Mux.scala:30:73]
wire tlb_bad_va = tlb_vm_enabled & ~(s1_tlb_req_vaddr[39:38] == 2'h0 | (&(s1_tlb_req_vaddr[39:38]))); // @[TLB.scala:399:{45,61}, :559:43, :560:{37,51,59,86}, :568:34]
wire tlb_tlb_miss = tlb_vm_enabled & ~tlb_bad_va & tlb_real_hits == 13'h0; // @[package.scala:45:27]
reg [6:0] tlb_state_vec_0; // @[Replacement.scala:305:17]
reg [2:0] tlb_state_reg_1; // @[Replacement.scala:168:70]
wire tlb_multipleHits_rightOne_1 = tlb_hitsVec_1 | tlb_hitsVec_2; // @[Misc.scala:183:16]
wire tlb_multipleHits_leftOne_2 = tlb_hitsVec_0 | tlb_multipleHits_rightOne_1; // @[Misc.scala:183:16]
wire tlb_multipleHits_rightOne_3 = tlb_hitsVec_4 | tlb_hitsVec_5; // @[Misc.scala:183:16]
wire tlb_multipleHits_rightOne_4 = tlb_hitsVec_3 | tlb_multipleHits_rightOne_3; // @[Misc.scala:183:16]
wire tlb_multipleHits_rightOne_6 = tlb_hitsVec_7 | tlb_hitsVec_8; // @[Misc.scala:183:16]
wire tlb_multipleHits_leftOne_8 = tlb_hitsVec_6 | tlb_multipleHits_rightOne_6; // @[Misc.scala:183:16]
wire tlb_multipleHits_leftOne_10 = tlb_hitsVec_9 | tlb_hitsVec_10; // @[Misc.scala:183:16]
wire tlb_multipleHits_rightOne_9 = tlb_hitsVec_11 | tlb_hitsVec_12; // @[Misc.scala:183:16]
wire tlb_multipleHits_rightOne_10 = tlb_multipleHits_leftOne_10 | tlb_multipleHits_rightOne_9; // @[Misc.scala:183:16]
wire tlb_multipleHits = tlb_hitsVec_1 & tlb_hitsVec_2 | tlb_hitsVec_0 & tlb_multipleHits_rightOne_1 | tlb_hitsVec_4 & tlb_hitsVec_5 | tlb_hitsVec_3 & tlb_multipleHits_rightOne_3 | tlb_multipleHits_leftOne_2 & tlb_multipleHits_rightOne_4 | tlb_hitsVec_7 & tlb_hitsVec_8 | tlb_hitsVec_6 & tlb_multipleHits_rightOne_6 | tlb_hitsVec_9 & tlb_hitsVec_10 | tlb_hitsVec_11 & tlb_hitsVec_12 | tlb_multipleHits_leftOne_10 & tlb_multipleHits_rightOne_9 | tlb_multipleHits_leftOne_8 & tlb_multipleHits_rightOne_10 | (tlb_multipleHits_leftOne_2 | tlb_multipleHits_rightOne_4) & (tlb_multipleHits_leftOne_8 | tlb_multipleHits_rightOne_10); // @[Misc.scala:183:{16,37,49,61}]
wire tlb_io_req_ready = tlb_state == 2'h0; // @[TLB.scala:352:22, :631:25]
wire _GEN_15 = metaArb_io_in_2_valid | metaArb_io_in_3_valid; // @[Arbiter.scala:145:26, :147:19]
wire metaArb_io_out_bits_write = resetting | metaArb_io_in_2_valid | metaArb_io_in_3_valid | metaArb_io_in_4_valid; // @[Arbiter.scala:145:26, :147:19]
wire metaArb__grant_T_2 = resetting | metaArb_io_in_2_valid | metaArb_io_in_3_valid; // @[Arbiter.scala:45:68]
wire metaArb__grant_T_3 = metaArb__grant_T_2 | metaArb_io_in_4_valid; // @[Arbiter.scala:45:68]
wire metaArb__grant_T_5 = metaArb__grant_T_3 | metaArb_io_in_6_valid; // @[Arbiter.scala:45:68]
wire metaArb_io_out_valid = metaArb__grant_T_5 | io_cpu_req_valid; // @[Arbiter.scala:45:68, :154:31]
wire dataArb__grant_T = dataArb_io_in_0_valid | dataArb_io_in_1_valid; // @[Arbiter.scala:45:68]
wire dataArb__grant_T_1 = dataArb__grant_T | dataArb_io_in_2_valid; // @[Arbiter.scala:45:68]
wire dataArb_io_out_valid = dataArb__grant_T_1 | dataArb_io_in_3_valid; // @[Arbiter.scala:45:68, :154:31]
reg s1_valid; // @[DCache.scala:182:25]
reg s1_probe; // @[DCache.scala:183:25]
reg [1:0] probe_bits_param; // @[DCache.scala:184:29]
reg [3:0] probe_bits_size; // @[DCache.scala:184:29]
reg probe_bits_source; // @[DCache.scala:184:29]
reg [31:0] probe_bits_address; // @[DCache.scala:184:29]
wire s1_valid_masked = s1_valid & ~io_cpu_s1_kill; // @[DCache.scala:182:25, :186:{34,37}]
reg [39:0] s1_vaddr; // @[DCache.scala:196:25]
reg [6:0] s1_req_tag; // @[DCache.scala:196:25]
reg [4:0] s1_req_cmd; // @[DCache.scala:196:25]
reg [1:0] s1_req_size; // @[DCache.scala:196:25]
reg s1_req_signed; // @[DCache.scala:196:25]
reg [1:0] s1_req_dprv; // @[DCache.scala:196:25]
reg s1_req_dv; // @[DCache.scala:196:25]
reg [1:0] s1_tlb_req_size; // @[DCache.scala:208:29]
reg [4:0] s1_tlb_req_cmd; // @[DCache.scala:208:29]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_1 = s1_req_cmd == 5'h0; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_2 = s1_req_cmd == 5'h10; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_3 = s1_req_cmd == 5'h6; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_29 = s1_req_cmd == 5'h7; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_31 = s1_req_cmd == 5'h4; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_32 = s1_req_cmd == 5'h9; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_33 = s1_req_cmd == 5'hA; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_34 = s1_req_cmd == 5'hB; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_38 = s1_req_cmd == 5'h8; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_39 = s1_req_cmd == 5'hC; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_40 = s1_req_cmd == 5'hD; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_41 = s1_req_cmd == 5'hE; // @[package.scala:16:47]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_42 = s1_req_cmd == 5'hF; // @[package.scala:16:47]
wire s1_read = _io_cpu_perf_canAcceptLoadThenLoad_T_1 | _io_cpu_perf_canAcceptLoadThenLoad_T_2 | _io_cpu_perf_canAcceptLoadThenLoad_T_3 | _io_cpu_perf_canAcceptLoadThenLoad_T_29 | _io_cpu_perf_canAcceptLoadThenLoad_T_31 | _io_cpu_perf_canAcceptLoadThenLoad_T_32 | _io_cpu_perf_canAcceptLoadThenLoad_T_33 | _io_cpu_perf_canAcceptLoadThenLoad_T_34 | _io_cpu_perf_canAcceptLoadThenLoad_T_38 | _io_cpu_perf_canAcceptLoadThenLoad_T_39 | _io_cpu_perf_canAcceptLoadThenLoad_T_40 | _io_cpu_perf_canAcceptLoadThenLoad_T_41 | _io_cpu_perf_canAcceptLoadThenLoad_T_42; // @[package.scala:16:47, :81:59]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_26 = s1_req_cmd == 5'h1; // @[DCache.scala:196:25]
wire _io_cpu_perf_canAcceptLoadThenLoad_T_51 = s1_req_cmd == 5'h11; // @[DCache.scala:196:25]
wire s1_write = _io_cpu_perf_canAcceptLoadThenLoad_T_26 | _io_cpu_perf_canAcceptLoadThenLoad_T_51 | _io_cpu_perf_canAcceptLoadThenLoad_T_29 | _io_cpu_perf_canAcceptLoadThenLoad_T_31 | _io_cpu_perf_canAcceptLoadThenLoad_T_32 | _io_cpu_perf_canAcceptLoadThenLoad_T_33 | _io_cpu_perf_canAcceptLoadThenLoad_T_34 | _io_cpu_perf_canAcceptLoadThenLoad_T_38 | _io_cpu_perf_canAcceptLoadThenLoad_T_39 | _io_cpu_perf_canAcceptLoadThenLoad_T_40 | _io_cpu_perf_canAcceptLoadThenLoad_T_41 | _io_cpu_perf_canAcceptLoadThenLoad_T_42; // @[package.scala:16:47, :81:59]
wire s1_sfence = s1_req_cmd == 5'h14 | s1_req_cmd == 5'h15 | s1_req_cmd == 5'h16; // @[DCache.scala:196:25, :213:{30,43,57,71,85}]
reg s1_flush_valid; // @[DCache.scala:215:27]
reg cached_grant_wait; // @[DCache.scala:223:34]
reg [8:0] flushCounter; // @[DCache.scala:225:29]
reg release_ack_wait; // @[DCache.scala:226:33]
reg [31:0] release_ack_addr; // @[DCache.scala:227:29]
reg [3:0] release_state; // @[DCache.scala:228:30]
wire _canAcceptCachedGrant_T = release_state == 4'h1; // @[package.scala:16:47]
wire _inWriteback_T_1 = release_state == 4'h2; // @[package.scala:16:47]
wire inWriteback = _canAcceptCachedGrant_T | _inWriteback_T_1; // @[package.scala:16:47, :81:59]
wire _io_cpu_req_ready_T_4 = release_state == 4'h0 & ~cached_grant_wait & ~s1_nack; // @[DCache.scala:187:41, :223:34, :228:30, :233:{38,51,54,73}, :571:36, :824:21, :839:24]
reg uncachedInFlight_0; // @[DCache.scala:236:33]
reg [39:0] uncachedReqs_0_addr; // @[DCache.scala:237:25]
reg [6:0] uncachedReqs_0_tag; // @[DCache.scala:237:25]
reg [1:0] uncachedReqs_0_size; // @[DCache.scala:237:25]
reg uncachedReqs_0_signed; // @[DCache.scala:237:25]
wire _pstore_drain_opportunistic_T = io_cpu_req_bits_cmd == 5'h0; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_1 = io_cpu_req_bits_cmd == 5'h10; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_2 = io_cpu_req_bits_cmd == 5'h6; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_28 = io_cpu_req_bits_cmd == 5'h7; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_30 = io_cpu_req_bits_cmd == 5'h4; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_31 = io_cpu_req_bits_cmd == 5'h9; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_32 = io_cpu_req_bits_cmd == 5'hA; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_33 = io_cpu_req_bits_cmd == 5'hB; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_37 = io_cpu_req_bits_cmd == 5'h8; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_38 = io_cpu_req_bits_cmd == 5'hC; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_39 = io_cpu_req_bits_cmd == 5'hD; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_40 = io_cpu_req_bits_cmd == 5'hE; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_41 = io_cpu_req_bits_cmd == 5'hF; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_T_25 = io_cpu_req_bits_cmd == 5'h1; // @[package.scala:16:47]
wire _pstore_drain_opportunistic_res_T_1 = io_cpu_req_bits_cmd == 5'h3; // @[package.scala:16:47]
wire _dataArb_io_in_3_valid_res_T_2 = _pstore_drain_opportunistic_T_25 | _pstore_drain_opportunistic_res_T_1; // @[package.scala:16:47, :81:59]
wire _pstore_drain_opportunistic_T_50 = io_cpu_req_bits_cmd == 5'h11; // @[Consts.scala:90:49]
assign dataArb_io_in_3_valid = io_cpu_req_valid & ~_dataArb_io_in_3_valid_res_T_2; // @[package.scala:81:59]
assign dataArb_io_in_3_bits_addr = io_cpu_req_bits_addr[11:0]; // @[DCache.scala:245:30]
wire _GEN_16 = dataArb__grant_T_1 & (_pstore_drain_opportunistic_T | _pstore_drain_opportunistic_T_1 | _pstore_drain_opportunistic_T_2 | _pstore_drain_opportunistic_T_28 | _pstore_drain_opportunistic_T_30 | _pstore_drain_opportunistic_T_31 | _pstore_drain_opportunistic_T_32 | _pstore_drain_opportunistic_T_33 | _pstore_drain_opportunistic_T_37 | _pstore_drain_opportunistic_T_38 | _pstore_drain_opportunistic_T_39 | _pstore_drain_opportunistic_T_40 | _pstore_drain_opportunistic_T_41); // @[Arbiter.scala:45:68]
reg s1_did_read; // @[DCache.scala:259:30]
assign metaArb_io_in_7_bits_idx = io_cpu_req_bits_addr[11:6]; // @[DCache.scala:263:58]
wire s1_cmd_uses_tlb = s1_read | s1_write | s1_req_cmd == 5'h5 & s1_req_size[0] | s1_req_cmd == 5'h17; // @[package.scala:81:59]
wire tlb_io_req_valid = s1_valid & ~io_cpu_s1_kill & s1_cmd_uses_tlb; // @[DCache.scala:182:25, :186:37, :212:30, :270:{38,55}, :273:{52,71}]
wire _GEN_17 = ~tlb_io_req_ready & ~io_ptw_resp_valid & ~io_cpu_req_bits_phys; // @[TLB.scala:631:25]
wire _GEN_18 = s1_valid & s1_cmd_uses_tlb & (io_ptw_resp_valid | tlb_tlb_miss | tlb_multipleHits); // @[Misc.scala:183:{37,49}]
wire tlb_io_sfence_valid = s1_valid & ~io_cpu_s1_kill & s1_sfence; // @[DCache.scala:182:25, :186:37, :213:{43,71}, :278:{35,54}]
assign rockettile_dcache_tag_array_MPORT_en = metaArb_io_out_valid & metaArb_io_out_bits_write; // @[Arbiter.scala:145:26, :147:19, :154:31]
assign rockettile_dcache_tag_array_s1_meta_en = metaArb_io_out_valid & ~metaArb_io_out_bits_write; // @[Arbiter.scala:145:26, :147:19, :154:31]
wire _s1_meta_hit_state_T = _rockettile_dcache_tag_array_RW0_rdata[19:0] == tlb_ppn; // @[Mux.scala:30:73]
wire _s1_meta_hit_state_T_4 = _rockettile_dcache_tag_array_RW0_rdata[41:22] == tlb_ppn; // @[Mux.scala:30:73]
wire _s1_meta_hit_state_T_8 = _rockettile_dcache_tag_array_RW0_rdata[63:44] == tlb_ppn; // @[Mux.scala:30:73]
wire _s1_meta_hit_state_T_12 = _rockettile_dcache_tag_array_RW0_rdata[85:66] == tlb_ppn; // @[Mux.scala:30:73]
wire _s1_meta_hit_state_T_16 = _rockettile_dcache_tag_array_RW0_rdata[107:88] == tlb_ppn; // @[Mux.scala:30:73]
wire _s1_meta_hit_state_T_20 = _rockettile_dcache_tag_array_RW0_rdata[129:110] == tlb_ppn; // @[Mux.scala:30:73]
wire _s1_meta_hit_state_T_24 = _rockettile_dcache_tag_array_RW0_rdata[151:132] == tlb_ppn; // @[Mux.scala:30:73]
wire _s1_meta_hit_state_T_28 = _rockettile_dcache_tag_array_RW0_rdata[173:154] == tlb_ppn; // @[Mux.scala:30:73]
wire [7:0] s1_hit_way = {(|(_rockettile_dcache_tag_array_RW0_rdata[175:174])) & _s1_meta_hit_state_T_28, (|(_rockettile_dcache_tag_array_RW0_rdata[153:152])) & _s1_meta_hit_state_T_24, (|(_rockettile_dcache_tag_array_RW0_rdata[131:130])) & _s1_meta_hit_state_T_20, (|(_rockettile_dcache_tag_array_RW0_rdata[109:108])) & _s1_meta_hit_state_T_16, (|(_rockettile_dcache_tag_array_RW0_rdata[87:86])) & _s1_meta_hit_state_T_12, (|(_rockettile_dcache_tag_array_RW0_rdata[65:64])) & _s1_meta_hit_state_T_8, (|(_rockettile_dcache_tag_array_RW0_rdata[43:42])) & _s1_meta_hit_state_T_4, (|(_rockettile_dcache_tag_array_RW0_rdata[21:20])) & _s1_meta_hit_state_T}; // @[package.scala:45:27]
wire [1:0] _s1_mask_xwr_T = {s1_vaddr[0] | (|s1_req_size), ~(s1_vaddr[0])}; // @[DCache.scala:196:25]
wire [3:0] _s1_mask_xwr_T_1 = {(s1_vaddr[1] ? _s1_mask_xwr_T : 2'h0) | {2{s1_req_size[1]}}, s1_vaddr[1] ? 2'h0 : _s1_mask_xwr_T}; // @[DCache.scala:196:25]
wire [7:0] s1_mask_xwr = {(s1_vaddr[2] ? _s1_mask_xwr_T_1 : 4'h0) | {4{&s1_req_size}}, s1_vaddr[2] ? 4'h0 : _s1_mask_xwr_T_1}; // @[DCache.scala:196:25]
reg s2_valid; // @[DCache.scala:331:25]
wire s2_valid_no_xcpt = s2_valid & {io_cpu_s2_xcpt_ma_ld_0, io_cpu_s2_xcpt_ma_st_0, io_cpu_s2_xcpt_pf_ld_0, io_cpu_s2_xcpt_pf_st_0, io_cpu_s2_xcpt_ae_ld_0, io_cpu_s2_xcpt_ae_st_0} == 6'h0; // @[DCache.scala:331:25, :332:{35,54,61}, :933:24]
reg s2_probe; // @[DCache.scala:333:25]
wire [4:0] _GEN_19 = {s1_probe | s2_probe, release_state}; // @[DCache.scala:183:25, :228:30, :333:25, :334:{34,46,63}]
reg s2_not_nacked_in_s1; // @[DCache.scala:335:36]
wire s2_valid_masked = s2_valid_no_xcpt & s2_not_nacked_in_s1; // @[DCache.scala:332:35, :335:36, :337:42]
reg [6:0] s2_req_tag; // @[DCache.scala:339:19]
reg [4:0] s2_req_cmd; // @[DCache.scala:339:19]
reg [1:0] s2_req_size; // @[DCache.scala:339:19]
reg s2_req_signed; // @[DCache.scala:339:19]
reg [1:0] s2_req_dprv; // @[DCache.scala:339:19]
reg s2_req_dv; // @[DCache.scala:339:19]
reg s2_tlb_xcpt_pf_ld; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_pf_st; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_ae_ld; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_ae_st; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_ma_ld; // @[DCache.scala:342:24]
reg s2_tlb_xcpt_ma_st; // @[DCache.scala:342:24]
reg s2_pma_cacheable; // @[DCache.scala:343:19]
reg [39:0] s2_uncached_resp_addr; // @[DCache.scala:344:34]
reg [39:0] s2_vaddr_r; // @[DCache.scala:351:31]
wire s2_lr = s2_req_cmd == 5'h6; // @[package.scala:16:47]
wire s2_sc = s2_req_cmd == 5'h7; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_5 = s2_req_cmd == 5'h4; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_6 = s2_req_cmd == 5'h9; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_7 = s2_req_cmd == 5'hA; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_8 = s2_req_cmd == 5'hB; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_12 = s2_req_cmd == 5'h8; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_13 = s2_req_cmd == 5'hC; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_14 = s2_req_cmd == 5'hD; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_15 = s2_req_cmd == 5'hE; // @[package.scala:16:47]
wire _io_cpu_store_pending_T_16 = s2_req_cmd == 5'hF; // @[package.scala:16:47]
wire s2_read = s2_req_cmd == 5'h0 | s2_req_cmd == 5'h10 | s2_lr | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16; // @[package.scala:16:47, :81:59]
wire _io_cpu_store_pending_T = s2_req_cmd == 5'h1; // @[DCache.scala:339:19]
wire _io_cpu_store_pending_T_1 = s2_req_cmd == 5'h11; // @[DCache.scala:339:19]
wire s2_write = _io_cpu_store_pending_T | _io_cpu_store_pending_T_1 | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16; // @[package.scala:16:47, :81:59]
wire s2_readwrite = s2_read | s2_write; // @[package.scala:81:59]
reg s2_flush_valid_pre_tag_ecc; // @[DCache.scala:355:43]
reg [21:0] s2_meta_corrected_r; // @[DCache.scala:361:61]
reg [21:0] s2_meta_corrected_r_1; // @[DCache.scala:361:61]
reg [21:0] s2_meta_corrected_r_2; // @[DCache.scala:361:61]
reg [21:0] s2_meta_corrected_r_3; // @[DCache.scala:361:61]
reg [21:0] s2_meta_corrected_r_4; // @[DCache.scala:361:61]
reg [21:0] s2_meta_corrected_r_5; // @[DCache.scala:361:61]
reg [21:0] s2_meta_corrected_r_6; // @[DCache.scala:361:61]
reg [21:0] s2_meta_corrected_r_7; // @[DCache.scala:361:61]
reg [63:0] s2_data; // @[DCache.scala:379:18]
reg [7:0] s2_probe_way; // @[DCache.scala:383:31]
reg [1:0] s2_probe_state_state; // @[DCache.scala:384:33]
reg [7:0] s2_hit_way; // @[DCache.scala:385:29]
reg [1:0] s2_hit_state_state; // @[DCache.scala:386:31]
wire _metaArb_io_in_3_bits_data_c_cat_T_46 = s2_req_cmd == 5'h3; // @[DCache.scala:339:19]
wire [3:0] _r_T = {_io_cpu_store_pending_T | _io_cpu_store_pending_T_1 | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16, _io_cpu_store_pending_T | _io_cpu_store_pending_T_1 | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16 | _metaArb_io_in_3_bits_data_c_cat_T_46 | s2_lr, s2_hit_state_state}; // @[package.scala:16:47, :81:59]
wire [1:0] _r_T_27 = {1'h0, _r_T == 4'hC}; // @[Misc.scala:35:36, :49:20]
wire s2_hit = _r_T == 4'h3 | _r_T == 4'h2 | _r_T == 4'h1 | _r_T == 4'h7 | _r_T == 4'h6 | (&_r_T) | _r_T == 4'hE; // @[Misc.scala:35:9, :49:20]
wire [15:0][1:0] _GEN_20 = {{2'h3}, {2'h3}, {2'h2}, {_r_T_27}, {_r_T_27}, {_r_T_27}, {_r_T_27}, {_r_T_27}, {2'h3}, {2'h2}, {2'h2}, {2'h1}, {2'h3}, {2'h2}, {2'h1}, {2'h0}}; // @[Misc.scala:35:36, :49:20]
wire s2_valid_hit_maybe_flush_pre_data_ecc_and_waw = s2_valid_masked & s2_hit; // @[Misc.scala:35:9]
wire s2_valid_hit_pre_data_ecc_and_waw = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw & s2_readwrite; // @[DCache.scala:354:30, :397:89, :418:89]
wire s2_valid_flush_line = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw & s2_req_cmd == 5'h5 & s2_req_size[0]; // @[DCache.scala:339:19, :340:{37,68}, :341:54, :397:89, :419:75]
wire s2_valid_miss = s2_valid_masked & s2_readwrite & ~s2_hit; // @[Misc.scala:35:9]
wire s2_valid_cached_miss = s2_valid_miss & s2_pma_cacheable & ~uncachedInFlight_0; // @[DCache.scala:236:33, :343:19, :423:{39,73}, :425:{44,60,63}]
wire s2_want_victimize = s2_valid_cached_miss | s2_valid_flush_line | s2_flush_valid_pre_tag_ecc; // @[DCache.scala:341:54, :355:43, :419:75, :425:60, :427:{77,123}]
wire s2_valid_uncached_pending = s2_valid_miss & ~s2_pma_cacheable & ~uncachedInFlight_0; // @[DCache.scala:236:33, :343:19, :423:{39,73}, :424:21, :430:{49,64,67}]
reg [2:0] s2_victim_way_r; // @[DCache.scala:431:41]
assign s2_victim_or_hit_way = (|s2_hit_state_state) ? s2_hit_way : 8'h1 << s2_victim_way_r; // @[OneHot.scala:58:35]
wire _s2_victim_state_T = s2_victim_way_r == 3'h0; // @[Mux.scala:32:36]
wire _s2_victim_state_T_1 = s2_victim_way_r == 3'h1; // @[Mux.scala:32:36]
wire _s2_victim_state_T_2 = s2_victim_way_r == 3'h2; // @[Mux.scala:32:36]
wire _s2_victim_state_T_3 = s2_victim_way_r == 3'h3; // @[Mux.scala:32:36]
wire _s2_victim_state_T_4 = s2_victim_way_r == 3'h4; // @[Mux.scala:32:36]
wire _s2_victim_state_T_5 = s2_victim_way_r == 3'h5; // @[Mux.scala:32:36]
wire _s2_victim_state_T_6 = s2_victim_way_r == 3'h6; // @[Mux.scala:32:36]
wire [1:0] s2_victim_state_state = (|s2_hit_state_state) ? s2_hit_state_state : (_s2_victim_state_T ? s2_meta_corrected_r[21:20] : 2'h0) | (_s2_victim_state_T_1 ? s2_meta_corrected_r_1[21:20] : 2'h0) | (_s2_victim_state_T_2 ? s2_meta_corrected_r_2[21:20] : 2'h0) | (_s2_victim_state_T_3 ? s2_meta_corrected_r_3[21:20] : 2'h0) | (_s2_victim_state_T_4 ? s2_meta_corrected_r_4[21:20] : 2'h0) | (_s2_victim_state_T_5 ? s2_meta_corrected_r_5[21:20] : 2'h0) | (_s2_victim_state_T_6 ? s2_meta_corrected_r_6[21:20] : 2'h0) | ((&s2_victim_way_r) ? s2_meta_corrected_r_7[21:20] : 2'h0); // @[Mux.scala:30:73, :32:36]
wire [3:0] _r_T_59 = {probe_bits_param, s2_probe_state_state}; // @[Metadata.scala:120:19]
wire _r_T_85 = _r_T_59 == 4'hB; // @[Misc.scala:56:20]
wire _r_T_88 = _r_T_59 == 4'h4; // @[Misc.scala:56:20]
wire _r_T_92 = _r_T_59 == 4'h5; // @[Misc.scala:56:20]
wire _r_T_96 = _r_T_59 == 4'h6; // @[Misc.scala:56:20]
wire _r_T_100 = _r_T_59 == 4'h7; // @[Misc.scala:56:20]
wire _r_T_104 = _r_T_59 == 4'h0; // @[Misc.scala:56:20]
wire _r_T_108 = _r_T_59 == 4'h1; // @[Misc.scala:56:20]
wire _r_T_112 = _r_T_59 == 4'h2; // @[Misc.scala:56:20]
wire _r_T_116 = _r_T_59 == 4'h3; // @[Misc.scala:56:20]
wire s2_prb_ack_data = _r_T_116 | ~(_r_T_112 | _r_T_108 | _r_T_104) & (_r_T_100 | ~(_r_T_96 | _r_T_92 | _r_T_88) & _r_T_85); // @[Misc.scala:38:9, :56:20]
wire _GEN_21 = _r_T_116 | _r_T_112; // @[Misc.scala:38:36, :56:20]
wire s2_victim_dirty = &s2_victim_state_state; // @[Misc.scala:38:9, :56:20]
wire io_cpu_s2_nack_0 = s2_valid_no_xcpt & ~(s2_valid_uncached_pending & auto_out_a_ready) & ~(s2_valid_masked & s2_req_cmd == 5'h17) & ~s2_valid_hit_pre_data_ecc_and_waw; // @[DCache.scala:332:35, :337:42, :339:19, :418:89, :430:{49,64}, :440:57, :441:61, :444:17, :445:{38,41,64,67,86,89}]
assign metaArb_io_in_2_valid = s2_valid_hit_pre_data_ecc_and_waw & s2_hit_state_state != _GEN_20[_r_T]; // @[Misc.scala:35:36, :49:20]
wire _GEN_22 = io_cpu_s2_nack_0 | metaArb_io_in_2_valid; // @[DCache.scala:445:{38,64,86}, :446:{24,62}]
assign metaArb_io_in_4_bits_idx = probe_bits_address[11:6]; // @[DCache.scala:184:29, :1200:47]
assign metaArb_io_in_3_bits_idx = s2_req_addr[11:6]; // @[DCache.scala:339:19, :453:76]
assign metaArb_io_in_2_bits_data = {_GEN_20[_r_T], s2_req_addr[31:12]}; // @[Misc.scala:35:36, :49:20]
reg [6:0] lrscCount; // @[DCache.scala:472:26]
reg [33:0] lrscAddr; // @[DCache.scala:475:21]
wire s2_sc_fail = s2_sc & ~((|(lrscCount[6:2])) & lrscAddr == s2_req_addr[39:6]); // @[package.scala:16:47]
reg [4:0] pstore1_cmd; // @[DCache.scala:492:30]
reg [39:0] pstore1_addr; // @[DCache.scala:493:31]
reg [63:0] pstore1_data; // @[DCache.scala:494:31]
reg [7:0] pstore1_way; // @[DCache.scala:495:30]
reg [7:0] pstore1_mask; // @[DCache.scala:496:31]
reg pstore1_rmw; // @[DCache.scala:498:44]
wire _pstore1_held_T = s2_valid_hit_pre_data_ecc_and_waw & s2_write; // @[package.scala:81:59]
reg pstore2_valid; // @[DCache.scala:501:30]
wire _pstore_drain_opportunistic_res_T_2 = _pstore_drain_opportunistic_T_25 | _pstore_drain_opportunistic_res_T_1; // @[package.scala:16:47, :81:59]
reg pstore_drain_on_miss_REG; // @[DCache.scala:503:56]
reg pstore1_held; // @[DCache.scala:504:29]
wire pstore1_valid_likely = s2_valid & s2_write | pstore1_held; // @[package.scala:81:59]
wire pstore1_valid = _pstore1_held_T & ~s2_sc_fail | pstore1_held; // @[DCache.scala:477:26, :490:{46,58,61}, :504:29, :507:38]
wire pstore_drain_structural = pstore1_valid_likely & pstore2_valid & (s1_valid & s1_write | pstore1_rmw); // @[package.scala:81:59]
wire _dataArb_io_in_0_valid_T_4 = s2_valid_hit_pre_data_ecc_and_waw & s2_write; // @[package.scala:81:59]
wire _dataArb_io_in_0_valid_T_9 = ~(io_cpu_req_valid & ~_pstore_drain_opportunistic_res_T_2) | (|_GEN_19) | pstore_drain_on_miss_REG; // @[package.scala:81:59]
assign pstore_drain = pstore_drain_structural | ((_dataArb_io_in_0_valid_T_4 | pstore1_held) & ~pstore1_rmw | pstore2_valid) & _dataArb_io_in_0_valid_T_9; // @[DCache.scala:498:44, :501:30, :503:46, :504:29, :506:{72,96}, :509:{54,71}, :517:44, :518:{41,44,58,76,107}]
reg [39:0] pstore2_addr; // @[DCache.scala:524:31]
reg [7:0] pstore2_way; // @[DCache.scala:525:30]
reg [7:0] pstore2_storegen_data_r; // @[DCache.scala:528:22]
reg [7:0] pstore2_storegen_data_r_1; // @[DCache.scala:528:22]
reg [7:0] pstore2_storegen_data_r_2; // @[DCache.scala:528:22]
reg [7:0] pstore2_storegen_data_r_3; // @[DCache.scala:528:22]
reg [7:0] pstore2_storegen_data_r_4; // @[DCache.scala:528:22]
reg [7:0] pstore2_storegen_data_r_5; // @[DCache.scala:528:22]
reg [7:0] pstore2_storegen_data_r_6; // @[DCache.scala:528:22]
reg [7:0] pstore2_storegen_data_r_7; // @[DCache.scala:528:22]
reg [7:0] pstore2_storegen_mask; // @[DCache.scala:531:19]
assign dataArb_io_in_0_valid = pstore_drain_structural | ((_dataArb_io_in_0_valid_T_4 | pstore1_held) & ~pstore1_rmw | pstore2_valid) & _dataArb_io_in_0_valid_T_9; // @[DCache.scala:498:44, :501:30, :503:46, :504:29, :506:{72,96}, :509:{54,71}, :517:44, :518:{41,44,58,76,107}]
assign _dataArb_io_in_0_bits_wordMask_wordMask_T = pstore2_valid ? pstore2_addr[11:0] : pstore1_addr[11:0]; // @[DCache.scala:493:31, :501:30, :524:31, :549:36]
assign dataArb_io_in_0_bits_way_en = pstore2_valid ? pstore2_way : pstore1_way; // @[DCache.scala:495:30, :501:30, :525:30, :550:38]
assign dataArb_io_in_0_bits_wdata = pstore2_valid ? {pstore2_storegen_data_r_7, pstore2_storegen_data_r_6, pstore2_storegen_data_r_5, pstore2_storegen_data_r_4, pstore2_storegen_data_r_3, pstore2_storegen_data_r_2, pstore2_storegen_data_r_1, pstore2_storegen_data_r} : pstore1_data; // @[package.scala:45:27]
assign dataArb_io_in_0_bits_eccMask = pstore2_valid ? pstore2_storegen_mask : pstore1_mask; // @[DCache.scala:496:31, :501:30, :531:19, :557:47]
wire _GEN_23 = s1_valid & s1_read & (pstore1_valid_likely & pstore1_addr[11:3] == s1_vaddr[11:3] & (s1_write ? (|(pstore1_mask & s1_mask_xwr)) : (|(pstore1_mask & s1_mask_xwr))) | pstore2_valid & pstore2_addr[11:3] == s1_vaddr[11:3] & (s1_write ? (|(pstore2_storegen_mask & s1_mask_xwr)) : (|(pstore2_storegen_mask & s1_mask_xwr)))); // @[package.scala:81:59]
wire get_a_mask_sub_sub_size = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire get_a_mask_sub_sub_0_1 = (&s2_req_size) | get_a_mask_sub_sub_size & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire get_a_mask_sub_sub_1_1 = (&s2_req_size) | get_a_mask_sub_sub_size & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire get_a_mask_sub_size = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire get_a_mask_sub_0_2 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire get_a_mask_sub_0_1 = get_a_mask_sub_sub_0_1 | get_a_mask_sub_size & get_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire get_a_mask_sub_1_2 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire get_a_mask_sub_1_1 = get_a_mask_sub_sub_0_1 | get_a_mask_sub_size & get_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire get_a_mask_sub_2_2 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire get_a_mask_sub_2_1 = get_a_mask_sub_sub_1_1 | get_a_mask_sub_size & get_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire get_a_mask_sub_3_2 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire get_a_mask_sub_3_1 = get_a_mask_sub_sub_1_1 | get_a_mask_sub_size & get_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire put_a_mask_sub_sub_size = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire put_a_mask_sub_sub_0_1 = (&s2_req_size) | put_a_mask_sub_sub_size & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire put_a_mask_sub_sub_1_1 = (&s2_req_size) | put_a_mask_sub_sub_size & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire put_a_mask_sub_size = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire put_a_mask_sub_0_2 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire put_a_mask_sub_0_1 = put_a_mask_sub_sub_0_1 | put_a_mask_sub_size & put_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire put_a_mask_sub_1_2 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire put_a_mask_sub_1_1 = put_a_mask_sub_sub_0_1 | put_a_mask_sub_size & put_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire put_a_mask_sub_2_2 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire put_a_mask_sub_2_1 = put_a_mask_sub_sub_1_1 | put_a_mask_sub_size & put_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire put_a_mask_sub_3_2 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire put_a_mask_sub_3_1 = put_a_mask_sub_sub_1_1 | put_a_mask_sub_size & put_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_sub_size = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_sub_0_1 = (&s2_req_size) | atomics_a_mask_sub_sub_size & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire atomics_a_mask_sub_sub_1_1 = (&s2_req_size) | atomics_a_mask_sub_sub_size & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire atomics_a_mask_sub_size = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_0_2 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_0_1 = atomics_a_mask_sub_sub_0_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_1_2 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_1_1 = atomics_a_mask_sub_sub_0_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_2_2 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_2_1 = atomics_a_mask_sub_sub_1_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_3_2 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_3_1 = atomics_a_mask_sub_sub_1_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_sub_size_1 = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_sub_0_1_1 = (&s2_req_size) | atomics_a_mask_sub_sub_size_1 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire atomics_a_mask_sub_sub_1_1_1 = (&s2_req_size) | atomics_a_mask_sub_sub_size_1 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire atomics_a_mask_sub_size_1 = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_0_2_1 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_0_1_1 = atomics_a_mask_sub_sub_0_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_1_2_1 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_1_1_1 = atomics_a_mask_sub_sub_0_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_2_2_1 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_2_1_1 = atomics_a_mask_sub_sub_1_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_2_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_3_2_1 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_3_1_1 = atomics_a_mask_sub_sub_1_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_3_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_sub_size_2 = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_sub_0_1_2 = (&s2_req_size) | atomics_a_mask_sub_sub_size_2 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire atomics_a_mask_sub_sub_1_1_2 = (&s2_req_size) | atomics_a_mask_sub_sub_size_2 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire atomics_a_mask_sub_size_2 = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_0_2_2 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_0_1_2 = atomics_a_mask_sub_sub_0_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_0_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_1_2_2 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_1_1_2 = atomics_a_mask_sub_sub_0_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_1_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_2_2_2 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_2_1_2 = atomics_a_mask_sub_sub_1_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_2_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_3_2_2 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_3_1_2 = atomics_a_mask_sub_sub_1_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_3_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_sub_size_3 = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_sub_0_1_3 = (&s2_req_size) | atomics_a_mask_sub_sub_size_3 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire atomics_a_mask_sub_sub_1_1_3 = (&s2_req_size) | atomics_a_mask_sub_sub_size_3 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire atomics_a_mask_sub_size_3 = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_0_2_3 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_0_1_3 = atomics_a_mask_sub_sub_0_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_0_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_1_2_3 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_1_1_3 = atomics_a_mask_sub_sub_0_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_1_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_2_2_3 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_2_1_3 = atomics_a_mask_sub_sub_1_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_2_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_3_2_3 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_3_1_3 = atomics_a_mask_sub_sub_1_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_3_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_sub_size_4 = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_sub_0_1_4 = (&s2_req_size) | atomics_a_mask_sub_sub_size_4 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire atomics_a_mask_sub_sub_1_1_4 = (&s2_req_size) | atomics_a_mask_sub_sub_size_4 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire atomics_a_mask_sub_size_4 = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_0_2_4 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_0_1_4 = atomics_a_mask_sub_sub_0_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_0_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_1_2_4 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_1_1_4 = atomics_a_mask_sub_sub_0_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_1_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_2_2_4 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_2_1_4 = atomics_a_mask_sub_sub_1_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_2_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_3_2_4 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_3_1_4 = atomics_a_mask_sub_sub_1_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_3_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_sub_size_5 = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_sub_0_1_5 = (&s2_req_size) | atomics_a_mask_sub_sub_size_5 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire atomics_a_mask_sub_sub_1_1_5 = (&s2_req_size) | atomics_a_mask_sub_sub_size_5 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire atomics_a_mask_sub_size_5 = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_0_2_5 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_0_1_5 = atomics_a_mask_sub_sub_0_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_0_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_1_2_5 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_1_1_5 = atomics_a_mask_sub_sub_0_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_1_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_2_2_5 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_2_1_5 = atomics_a_mask_sub_sub_1_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_2_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_3_2_5 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_3_1_5 = atomics_a_mask_sub_sub_1_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_3_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_sub_size_6 = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_sub_0_1_6 = (&s2_req_size) | atomics_a_mask_sub_sub_size_6 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire atomics_a_mask_sub_sub_1_1_6 = (&s2_req_size) | atomics_a_mask_sub_sub_size_6 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire atomics_a_mask_sub_size_6 = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_0_2_6 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_0_1_6 = atomics_a_mask_sub_sub_0_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_0_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_1_2_6 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_1_1_6 = atomics_a_mask_sub_sub_0_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_1_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_2_2_6 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_2_1_6 = atomics_a_mask_sub_sub_1_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_2_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_3_2_6 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_3_1_6 = atomics_a_mask_sub_sub_1_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_3_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_sub_size_7 = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_sub_0_1_7 = (&s2_req_size) | atomics_a_mask_sub_sub_size_7 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire atomics_a_mask_sub_sub_1_1_7 = (&s2_req_size) | atomics_a_mask_sub_sub_size_7 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire atomics_a_mask_sub_size_7 = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_0_2_7 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_0_1_7 = atomics_a_mask_sub_sub_0_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_0_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_1_2_7 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_1_1_7 = atomics_a_mask_sub_sub_0_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_1_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_2_2_7 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_2_1_7 = atomics_a_mask_sub_sub_1_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_2_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_3_2_7 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_3_1_7 = atomics_a_mask_sub_sub_1_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_3_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_sub_size_8 = s2_req_size == 2'h2; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_sub_0_1_8 = (&s2_req_size) | atomics_a_mask_sub_sub_size_8 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}]
wire atomics_a_mask_sub_sub_1_1_8 = (&s2_req_size) | atomics_a_mask_sub_sub_size_8 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}]
wire atomics_a_mask_sub_size_8 = s2_req_size == 2'h1; // @[Misc.scala:209:26]
wire atomics_a_mask_sub_0_2_8 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_0_1_8 = atomics_a_mask_sub_sub_0_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_0_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_1_2_8 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_1_1_8 = atomics_a_mask_sub_sub_0_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_1_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_2_2_8 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27]
wire atomics_a_mask_sub_2_1_8 = atomics_a_mask_sub_sub_1_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_2_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire atomics_a_mask_sub_3_2_8 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27]
wire atomics_a_mask_sub_3_1_8 = atomics_a_mask_sub_sub_1_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_3_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}]
wire [7:0] atomics_mask =
_io_cpu_store_pending_T_16
? {atomics_a_mask_sub_3_1_8 | atomics_a_mask_sub_3_2_8 & s2_req_addr[0], atomics_a_mask_sub_3_1_8 | atomics_a_mask_sub_3_2_8 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_8 | atomics_a_mask_sub_2_2_8 & s2_req_addr[0], atomics_a_mask_sub_2_1_8 | atomics_a_mask_sub_2_2_8 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_8 | atomics_a_mask_sub_1_2_8 & s2_req_addr[0], atomics_a_mask_sub_1_1_8 | atomics_a_mask_sub_1_2_8 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_8 | atomics_a_mask_sub_0_2_8 & s2_req_addr[0], atomics_a_mask_sub_0_1_8 | atomics_a_mask_sub_0_2_8 & ~(s2_req_addr[0])}
: _io_cpu_store_pending_T_15
? {atomics_a_mask_sub_3_1_7 | atomics_a_mask_sub_3_2_7 & s2_req_addr[0], atomics_a_mask_sub_3_1_7 | atomics_a_mask_sub_3_2_7 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_7 | atomics_a_mask_sub_2_2_7 & s2_req_addr[0], atomics_a_mask_sub_2_1_7 | atomics_a_mask_sub_2_2_7 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_7 | atomics_a_mask_sub_1_2_7 & s2_req_addr[0], atomics_a_mask_sub_1_1_7 | atomics_a_mask_sub_1_2_7 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_7 | atomics_a_mask_sub_0_2_7 & s2_req_addr[0], atomics_a_mask_sub_0_1_7 | atomics_a_mask_sub_0_2_7 & ~(s2_req_addr[0])}
: _io_cpu_store_pending_T_14
? {atomics_a_mask_sub_3_1_6 | atomics_a_mask_sub_3_2_6 & s2_req_addr[0], atomics_a_mask_sub_3_1_6 | atomics_a_mask_sub_3_2_6 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_6 | atomics_a_mask_sub_2_2_6 & s2_req_addr[0], atomics_a_mask_sub_2_1_6 | atomics_a_mask_sub_2_2_6 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_6 | atomics_a_mask_sub_1_2_6 & s2_req_addr[0], atomics_a_mask_sub_1_1_6 | atomics_a_mask_sub_1_2_6 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_6 | atomics_a_mask_sub_0_2_6 & s2_req_addr[0], atomics_a_mask_sub_0_1_6 | atomics_a_mask_sub_0_2_6 & ~(s2_req_addr[0])}
: _io_cpu_store_pending_T_13
? {atomics_a_mask_sub_3_1_5 | atomics_a_mask_sub_3_2_5 & s2_req_addr[0], atomics_a_mask_sub_3_1_5 | atomics_a_mask_sub_3_2_5 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_5 | atomics_a_mask_sub_2_2_5 & s2_req_addr[0], atomics_a_mask_sub_2_1_5 | atomics_a_mask_sub_2_2_5 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_5 | atomics_a_mask_sub_1_2_5 & s2_req_addr[0], atomics_a_mask_sub_1_1_5 | atomics_a_mask_sub_1_2_5 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_5 | atomics_a_mask_sub_0_2_5 & s2_req_addr[0], atomics_a_mask_sub_0_1_5 | atomics_a_mask_sub_0_2_5 & ~(s2_req_addr[0])}
: _io_cpu_store_pending_T_12
? {atomics_a_mask_sub_3_1_4 | atomics_a_mask_sub_3_2_4 & s2_req_addr[0], atomics_a_mask_sub_3_1_4 | atomics_a_mask_sub_3_2_4 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_4 | atomics_a_mask_sub_2_2_4 & s2_req_addr[0], atomics_a_mask_sub_2_1_4 | atomics_a_mask_sub_2_2_4 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_4 | atomics_a_mask_sub_1_2_4 & s2_req_addr[0], atomics_a_mask_sub_1_1_4 | atomics_a_mask_sub_1_2_4 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_4 | atomics_a_mask_sub_0_2_4 & s2_req_addr[0], atomics_a_mask_sub_0_1_4 | atomics_a_mask_sub_0_2_4 & ~(s2_req_addr[0])}
: _io_cpu_store_pending_T_8
? {atomics_a_mask_sub_3_1_3 | atomics_a_mask_sub_3_2_3 & s2_req_addr[0], atomics_a_mask_sub_3_1_3 | atomics_a_mask_sub_3_2_3 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_3 | atomics_a_mask_sub_2_2_3 & s2_req_addr[0], atomics_a_mask_sub_2_1_3 | atomics_a_mask_sub_2_2_3 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_3 | atomics_a_mask_sub_1_2_3 & s2_req_addr[0], atomics_a_mask_sub_1_1_3 | atomics_a_mask_sub_1_2_3 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_3 | atomics_a_mask_sub_0_2_3 & s2_req_addr[0], atomics_a_mask_sub_0_1_3 | atomics_a_mask_sub_0_2_3 & ~(s2_req_addr[0])}
: _io_cpu_store_pending_T_7 ? {atomics_a_mask_sub_3_1_2 | atomics_a_mask_sub_3_2_2 & s2_req_addr[0], atomics_a_mask_sub_3_1_2 | atomics_a_mask_sub_3_2_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_2 | atomics_a_mask_sub_2_2_2 & s2_req_addr[0], atomics_a_mask_sub_2_1_2 | atomics_a_mask_sub_2_2_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_2 | atomics_a_mask_sub_1_2_2 & s2_req_addr[0], atomics_a_mask_sub_1_1_2 | atomics_a_mask_sub_1_2_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_2 | atomics_a_mask_sub_0_2_2 & s2_req_addr[0], atomics_a_mask_sub_0_1_2 | atomics_a_mask_sub_0_2_2 & ~(s2_req_addr[0])} : _io_cpu_store_pending_T_6 ? {atomics_a_mask_sub_3_1_1 | atomics_a_mask_sub_3_2_1 & s2_req_addr[0], atomics_a_mask_sub_3_1_1 | atomics_a_mask_sub_3_2_1 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_1 | atomics_a_mask_sub_2_2_1 & s2_req_addr[0], atomics_a_mask_sub_2_1_1 | atomics_a_mask_sub_2_2_1 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_1 | atomics_a_mask_sub_1_2_1 & s2_req_addr[0], atomics_a_mask_sub_1_1_1 | atomics_a_mask_sub_1_2_1 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_1 | atomics_a_mask_sub_0_2_1 & s2_req_addr[0], atomics_a_mask_sub_0_1_1 | atomics_a_mask_sub_0_2_1 & ~(s2_req_addr[0])} : _io_cpu_store_pending_T_5 ? {atomics_a_mask_sub_3_1 | atomics_a_mask_sub_3_2 & s2_req_addr[0], atomics_a_mask_sub_3_1 | atomics_a_mask_sub_3_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1 | atomics_a_mask_sub_2_2 & s2_req_addr[0], atomics_a_mask_sub_2_1 | atomics_a_mask_sub_2_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1 | atomics_a_mask_sub_1_2 & s2_req_addr[0], atomics_a_mask_sub_1_1 | atomics_a_mask_sub_1_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1 | atomics_a_mask_sub_0_2 & s2_req_addr[0], atomics_a_mask_sub_0_1 | atomics_a_mask_sub_0_2 & ~(s2_req_addr[0])} : 8'h0; // @[package.scala:16:47]
wire tl_out_a_valid = s2_valid_uncached_pending | s2_valid_cached_miss & ~(release_ack_wait & (s2_req_addr[20:6] ^ release_ack_addr[20:6]) == 15'h0) & ~s2_victim_dirty; // @[Misc.scala:38:9]
wire _GEN_24 = ~s2_write | _io_cpu_store_pending_T_1 | ~s2_read | _io_cpu_store_pending_T_16 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_5; // @[package.scala:16:47, :81:59]
wire _io_errors_bus_valid_T = nodeOut_d_ready & auto_out_d_valid; // @[Decoupled.scala:51:35]
wire [26:0] _r_beats1_decode_T = 27'hFFF << auto_out_d_bits_size; // @[package.scala:243:71]
wire [8:0] r_beats1 = auto_out_d_bits_opcode[0] ? ~(_r_beats1_decode_T[11:3]) : 9'h0; // @[package.scala:243:{46,71,76}]
reg [8:0] r_counter; // @[Edges.scala:229:27]
wire [8:0] _r_counter1_T = r_counter - 9'h1; // @[Edges.scala:229:27, :230:28]
wire d_last = r_counter == 9'h1 | r_beats1 == 9'h0; // @[Edges.scala:221:14, :229:27, :232:{25,33,43}]
wire [8:0] r_4 = r_beats1 & ~_r_counter1_T; // @[Edges.scala:221:14, :230:28, :234:{25,27}]
wire grantIsUncachedData = auto_out_d_bits_opcode == 3'h1; // @[package.scala:16:47]
wire grantIsUncached = grantIsUncachedData | auto_out_d_bits_opcode == 3'h0 | auto_out_d_bits_opcode == 3'h2; // @[package.scala:16:47, :81:59]
wire grantIsRefill = auto_out_d_bits_opcode == 3'h5; // @[package.scala:16:47]
wire grantIsCached = auto_out_d_bits_opcode == 3'h4 | grantIsRefill; // @[package.scala:16:47, :81:59]
wire grantIsVoluntary = auto_out_d_bits_opcode == 3'h6; // @[DCache.scala:665:32]
reg grantInProgress; // @[DCache.scala:667:32]
reg [2:0] blockProbeAfterGrantCount; // @[DCache.scala:668:42]
wire _metaArb_io_in_4_valid_T = release_state == 4'h6; // @[package.scala:16:47]
wire _nodeOut_c_valid_T_1 = release_state == 4'h9; // @[package.scala:16:47]
wire _canAcceptCachedGrant_T_4 = _canAcceptCachedGrant_T | _metaArb_io_in_4_valid_T | _nodeOut_c_valid_T_1; // @[package.scala:16:47, :81:59]
wire _GEN_25 = _io_errors_bus_valid_T & grantIsCached; // @[Decoupled.scala:51:35]
wire _GEN_26 = auto_out_d_bits_source & d_last; // @[Edges.scala:232:33]
wire _GEN_27 = grantIsRefill & dataArb_io_in_0_valid; // @[package.scala:16:47]
wire nodeOut_e_valid = ~_GEN_27 & auto_out_d_valid & ~(|r_counter) & grantIsCached & ~_canAcceptCachedGrant_T_4; // @[package.scala:81:59]
assign dataArb_io_in_1_bits_addr = {s2_req_addr[11:6] | r_4[8:3], r_4[2:0], 3'h0}; // @[Edges.scala:234:25]
assign metaArb_io_in_3_valid = grantIsCached & d_last & _io_errors_bus_valid_T & ~auto_out_d_bits_denied; // @[Decoupled.scala:51:35]
wire [3:0] _metaArb_io_in_3_bits_data_T_1 = {_io_cpu_store_pending_T | _io_cpu_store_pending_T_1 | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16, _io_cpu_store_pending_T | _io_cpu_store_pending_T_1 | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16 | _metaArb_io_in_3_bits_data_c_cat_T_46 | s2_lr, auto_out_d_bits_param}; // @[package.scala:16:47, :81:59]
assign metaArb_io_in_3_bits_data = {_metaArb_io_in_3_bits_data_T_1 == 4'hC ? 2'h3 : _metaArb_io_in_3_bits_data_T_1 == 4'h4 | _metaArb_io_in_3_bits_data_T_1 == 4'h0 ? 2'h2 : {1'h0, _metaArb_io_in_3_bits_data_T_1 == 4'h1}, s2_req_addr[31:12]}; // @[Metadata.scala:84:{18,38}]
reg blockUncachedGrant; // @[DCache.scala:750:33]
wire _GEN_28 = grantIsUncachedData & (blockUncachedGrant | s1_valid); // @[package.scala:16:47]
assign nodeOut_d_ready = ~(_GEN_28 | _GEN_27) & (~grantIsCached | ((|r_counter) | auto_out_e_ready) & ~_canAcceptCachedGrant_T_4); // @[package.scala:81:59]
wire io_cpu_req_ready_0 = _GEN_28 ? ~(auto_out_d_valid | _GEN_17 | metaArb__grant_T_5 | _GEN_16) & _io_cpu_req_ready_T_4 : ~(_GEN_17 | metaArb__grant_T_5 | _GEN_16) & _io_cpu_req_ready_T_4; // @[Arbiter.scala:45:68]
wire _GEN_29 = _GEN_28 & auto_out_d_valid; // @[DCache.scala:721:26, :752:{31,68}, :755:29, :757:32]
assign dataArb_io_in_1_valid = _GEN_29 | auto_out_d_valid & grantIsRefill & ~_canAcceptCachedGrant_T_4; // @[package.scala:16:47, :81:59]
assign dataArb_io_in_1_bits_write = ~_GEN_28 | ~auto_out_d_valid; // @[DCache.scala:727:33, :752:{31,68}, :755:29, :758:37]
wire block_probe_for_core_progress = (|blockProbeAfterGrantCount) | (|(lrscCount[6:2])); // @[DCache.scala:472:26, :473:29, :668:42, :669:35, :766:71]
wire nodeOut_b_ready = ~metaArb__grant_T_3 & ~(block_probe_for_core_progress | (|_GEN_19) | release_ack_wait & (auto_out_b_bits_address[20:6] ^ release_ack_addr[20:6]) == 15'h0 | grantInProgress | s1_valid | s2_valid); // @[Arbiter.scala:45:{68,78}]
wire _io_cpu_perf_release_T = auto_out_c_ready & nodeOut_c_valid; // @[Decoupled.scala:51:35]
wire [26:0] _io_cpu_perf_release_beats1_decode_T = 27'hFFF << nodeOut_c_bits_size; // @[package.scala:243:71]
wire [8:0] r_beats1_1 = nodeOut_c_bits_opcode[0] ? ~(_io_cpu_perf_release_beats1_decode_T[11:3]) : 9'h0; // @[package.scala:243:{46,71,76}]
reg [8:0] r_counter_1; // @[Edges.scala:229:27]
wire [8:0] _r_counter1_T_1 = r_counter_1 - 9'h1; // @[Edges.scala:229:27, :230:28]
wire c_first = r_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire releaseDone = (r_counter_1 == 9'h1 | r_beats1_1 == 9'h0) & _io_cpu_perf_release_T; // @[Decoupled.scala:51:35]
reg s1_release_data_valid; // @[DCache.scala:801:38]
reg s2_release_data_valid; // @[DCache.scala:802:38]
wire releaseRejected = s2_release_data_valid & ~_io_cpu_perf_release_T; // @[Decoupled.scala:51:35]
wire [9:0] _releaseDataBeat_T_5 = {1'h0, r_beats1_1 & ~_r_counter1_T_1} + {8'h0, releaseRejected ? 2'h0 : {1'h0, s1_release_data_valid} + {1'h0, s2_release_data_valid}}; // @[Edges.scala:221:14, :230:28, :234:{25,27}]
assign s1_nack = s2_probe ? s2_prb_ack_data | (|s2_probe_state_state) | ~releaseDone | _GEN_23 | _GEN_22 | _GEN_18 : _GEN_23 | _GEN_22 | _GEN_18; // @[Misc.scala:38:9]
wire _GEN_30 = release_state == 4'h4; // @[DCache.scala:228:30, :841:25]
assign metaArb_io_in_6_valid = _GEN_30 | auto_out_b_valid & (~block_probe_for_core_progress | (|lrscCount) & lrscCount < 7'h4); // @[DCache.scala:472:26, :473:29, :474:{34,40,43}, :766:71, :769:{26,44,48,79}, :841:{25,44}, :842:30]
assign metaArb_io_in_6_bits_idx = _GEN_30 ? metaArb_io_in_4_bits_idx : auto_out_b_bits_address[11:6]; // @[DCache.scala:772:29, :841:{25,44}, :843:33, :1200:47]
wire _GEN_31 = release_state == 4'h5; // @[DCache.scala:228:30, :850:25]
wire _GEN_32 = release_state == 4'h3; // @[DCache.scala:228:30, :854:25]
assign nodeOut_c_valid = _GEN_32 | _GEN_31 | s2_probe & ~s2_prb_ack_data | s2_release_data_valid & ~(c_first & release_ack_wait); // @[Misc.scala:38:9]
wire _GEN_33 = _canAcceptCachedGrant_T | _metaArb_io_in_4_valid_T | _nodeOut_c_valid_T_1; // @[package.scala:16:47, :81:59]
assign nodeOut_c_bits_opcode = _GEN_33 ? {2'h3, ~_nodeOut_c_valid_T_1} : {2'h2, _inWriteback_T_1}; // @[package.scala:16:47, :81:59]
assign nodeOut_c_bits_size = _GEN_33 ? 4'h6 : probe_bits_size; // @[package.scala:81:59]
assign releaseWay = _GEN_33 ? s2_victim_or_hit_way : s2_probe_way; // @[package.scala:81:59]
assign dataArb_io_in_2_valid = inWriteback & _releaseDataBeat_T_5 < 10'h8; // @[package.scala:81:59]
assign dataArb_io_in_2_bits_addr = {metaArb_io_in_4_bits_idx, _releaseDataBeat_T_5[2:0], 3'h0}; // @[DCache.scala:804:43, :903:{72,90}, :1200:47]
assign metaArb_io_in_4_valid = _metaArb_io_in_4_valid_T | release_state == 4'h7; // @[package.scala:16:47, :81:59]
assign metaArb_io_in_7_bits_data = {_GEN_33 ? 2'h0 : _GEN_21 ? 2'h2 : _r_T_108 ? 2'h1 : _r_T_104 ? 2'h0 : {1'h0, _r_T_100 | _r_T_96 | _r_T_92}, probe_bits_address[31:12]}; // @[package.scala:81:59]
reg io_cpu_s2_xcpt_REG; // @[DCache.scala:933:32]
assign io_cpu_s2_xcpt_pf_ld_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_pf_ld; // @[DCache.scala:342:24, :933:{24,32}]
assign io_cpu_s2_xcpt_pf_st_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_pf_st; // @[DCache.scala:342:24, :933:{24,32}]
assign io_cpu_s2_xcpt_ae_ld_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ae_ld; // @[DCache.scala:342:24, :933:{24,32}]
assign io_cpu_s2_xcpt_ae_st_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ae_st; // @[DCache.scala:342:24, :933:{24,32}]
assign io_cpu_s2_xcpt_ma_ld_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ma_ld; // @[DCache.scala:342:24, :933:{24,32}]
assign io_cpu_s2_xcpt_ma_st_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ma_st; // @[DCache.scala:342:24, :933:{24,32}]
reg doUncachedResp; // @[DCache.scala:948:31]
wire io_cpu_replay_next_0 = _io_errors_bus_valid_T & grantIsUncachedData; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File RegisterRouter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.resources.{Device, Resource, ResourceBindings}
import freechips.rocketchip.prci.{NoCrossing}
import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter}
import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno}
import scala.math.min
class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle {
val source = UInt((sourceBits max 1).W)
val size = UInt((sizeBits max 1).W)
}
case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra")
case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => {
x.size := 0.U
x.source := 0.U
})
/** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers.
* It provides functionality for describing and outputting metdata about the registers in several formats.
* It also provides a concrete implementation of a regmap function that will be used
* to wire a map of internal registers associated with this node to the node's interconnect port.
*/
case class TLRegisterNode(
address: Seq[AddressSet],
device: Device,
deviceKey: String = "reg/control",
concurrency: Int = 0,
beatBytes: Int = 4,
undefZero: Boolean = true,
executable: Boolean = false)(
implicit valName: ValName)
extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1(
Seq(TLSlaveParameters.v1(
address = address,
resources = Seq(Resource(device, deviceKey)),
executable = executable,
supportsGet = TransferSizes(1, beatBytes),
supportsPutPartial = TransferSizes(1, beatBytes),
supportsPutFull = TransferSizes(1, beatBytes),
fifoId = Some(0))), // requests are handled in order
beatBytes = beatBytes,
minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle
{
val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min)
require (size >= beatBytes)
address.foreach { case a =>
require (a.widen(size-1).base == address.head.widen(size-1).base,
s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}")
}
// Calling this method causes the matching TL2 bundle to be
// configured to route all requests to the listed RegFields.
def regmap(mapping: RegField.Map*) = {
val (bundleIn, edge) = this.in(0)
val a = bundleIn.a
val d = bundleIn.d
val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields
val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields)
val in = Wire(Decoupled(new RegMapperInput(params)))
in.bits.read := a.bits.opcode === TLMessages.Get
in.bits.index := edge.addr_hi(a.bits)
in.bits.data := a.bits.data
in.bits.mask := a.bits.mask
Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match {
case (lhs, rhs) => lhs :<= rhs
}
val a_extra = in.bits.extra(TLRegisterRouterExtra)
a_extra.source := a.bits.source
a_extra.size := a.bits.size
// Invoke the register map builder
val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*)
// No flow control needed
in.valid := a.valid
a.ready := in.ready
d.valid := out.valid
out.ready := d.ready
// We must restore the size to enable width adapters to work
val d_extra = out.bits.extra(TLRegisterRouterExtra)
d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size)
// avoid a Mux on the data bus by manually overriding two fields
d.bits.data := out.bits.data
Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match {
case (lhs, rhs) => lhs :<= rhs
}
d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck)
// Tie off unused channels
bundleIn.b.valid := false.B
bundleIn.c.ready := true.B
bundleIn.e.ready := true.B
genRegDescsJson(mapping:_*)
}
def genRegDescsJson(mapping: RegField.Map*): Unit = {
// Dump out the register map for documentation purposes.
val base = address.head.base
val baseHex = s"0x${base.toInt.toHexString}"
val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}"
val json = GenRegDescsAnno.serialize(base, name, mapping:_*)
var suffix = 0
while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) {
suffix = suffix + 1
}
ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json)
val module = Module.currentModule.get.asInstanceOf[RawModule]
GenRegDescsAnno.anno(
module,
base,
mapping:_*)
}
}
/** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink.
* - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers.
* - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect.
* - Use the mapping helper function regmap to internally fill out the space of device control registers.
*/
trait HasTLControlRegMap { this: RegisterRouter =>
protected val controlNode = TLRegisterNode(
address = address,
device = device,
deviceKey = "reg/control",
concurrency = concurrency,
beatBytes = beatBytes,
undefZero = undefZero,
executable = executable)
// Externally, this helper should be used to connect the register control port to a bus
val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode)
// Backwards-compatibility default node accessor with no clock crossing
lazy val node: TLInwardNode = controlXing(NoCrossing)
// Internally, this function should be used to populate the control port with registers
protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) }
}
File TileClockGater.scala:
package chipyard.clocking
import chisel3._
import chisel3.util._
import chisel3.experimental.Analog
import org.chipsalliance.cde.config._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.prci._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.subsystem._
/** This node adds clock gating control registers.
* If deploying on a platform which does not support clock gating, deasserting the enable
* flag will generate the registers, preserving the same memory map and behavior, but will not
* generate any gaters
*/
class TileClockGater(address: BigInt, beatBytes: Int)(implicit p: Parameters, valName: ValName) extends LazyModule
{
val device = new SimpleDevice(s"clock-gater", Nil)
val clockNode = ClockGroupIdentityNode()
val tlNode = TLRegisterNode(Seq(AddressSet(address, 4096-1)), device, "reg/control", beatBytes=beatBytes)
lazy val module = new LazyModuleImp(this) {
val sources = clockNode.in.head._1.member.data.toSeq
val sinks = clockNode.out.head._1.member.elements.toSeq
val nSinks = sinks.size
val regs = (0 until nSinks).map({i =>
val sinkName = sinks(i)._1
val reg = withReset(sources(i).reset) { Module(new AsyncResetRegVec(w=1, init=1)) }
if (sinkName.contains("tile")) {
println(s"${(address+i*4).toString(16)}: Tile $sinkName clock gate")
sinks(i)._2.clock := ClockGate(sources(i).clock, reg.io.q.asBool)
sinks(i)._2.reset := sources(i).reset
} else {
sinks(i)._2 := sources(i)
}
reg
})
tlNode.regmap((0 until nSinks).map({i =>
i*4 -> Seq(RegField.rwReg(1, regs(i).io))
}): _*)
}
}
File MuxLiteral.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.log2Ceil
import scala.reflect.ClassTag
/* MuxLiteral creates a lookup table from a key to a list of values.
* Unlike MuxLookup, the table keys must be exclusive literals.
*/
object MuxLiteral
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (UInt, T), rest: (UInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(UInt, T)]): T =
MuxTable(index, default, cases.map { case (k, v) => (k.litValue, v) })
}
object MuxSeq
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: T, rest: T*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[T]): T =
MuxTable(index, default, cases.zipWithIndex.map { case (v, i) => (BigInt(i), v) })
}
object MuxTable
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (BigInt, T), rest: (BigInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(BigInt, T)]): T = {
/* All keys must be >= 0 and distinct */
cases.foreach { case (k, _) => require (k >= 0) }
require (cases.map(_._1).distinct.size == cases.size)
/* Filter out any cases identical to the default */
val simple = cases.filter { case (k, v) => !default.isLit || !v.isLit || v.litValue != default.litValue }
val maxKey = (BigInt(0) +: simple.map(_._1)).max
val endIndex = BigInt(1) << log2Ceil(maxKey+1)
if (simple.isEmpty) {
default
} else if (endIndex <= 2*simple.size) {
/* The dense encoding case uses a Vec */
val table = Array.fill(endIndex.toInt) { default }
simple.foreach { case (k, v) => table(k.toInt) = v }
Mux(index >= endIndex.U, default, VecInit(table)(index))
} else {
/* The sparse encoding case uses switch */
val out = WireDefault(default)
simple.foldLeft(new chisel3.util.SwitchContext(index, None, Set.empty)) { case (acc, (k, v)) =>
acc.is (k.U) { out := v }
}
out
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TileClockGater( // @[TileClockGater.scala:27:25]
input clock, // @[TileClockGater.scala:27:25]
input reset, // @[TileClockGater.scala:27:25]
output auto_clock_gater_in_1_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_1_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_clock_gater_in_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_clock_gater_in_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_clock_gater_in_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [11:0] auto_clock_gater_in_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [20:0] auto_clock_gater_in_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_clock_gater_in_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_clock_gater_in_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_1_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_clock_gater_in_1_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_clock_gater_in_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_clock_gater_in_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [11:0] auto_clock_gater_in_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_clock_gater_in_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_0_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_0_member_allClocks_uncore_reset, // @[LazyModuleImp.scala:107:25]
output auto_clock_gater_out_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25]
output auto_clock_gater_out_member_allClocks_uncore_reset // @[LazyModuleImp.scala:107:25]
);
wire out_front_valid; // @[RegisterRouter.scala:87:24]
wire out_front_ready; // @[RegisterRouter.scala:87:24]
wire out_bits_read; // @[RegisterRouter.scala:87:24]
wire [11:0] out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [8:0] in_bits_index; // @[RegisterRouter.scala:73:18]
wire in_bits_read; // @[RegisterRouter.scala:73:18]
wire auto_clock_gater_in_1_a_valid_0 = auto_clock_gater_in_1_a_valid; // @[TileClockGater.scala:27:25]
wire [2:0] auto_clock_gater_in_1_a_bits_opcode_0 = auto_clock_gater_in_1_a_bits_opcode; // @[TileClockGater.scala:27:25]
wire [2:0] auto_clock_gater_in_1_a_bits_param_0 = auto_clock_gater_in_1_a_bits_param; // @[TileClockGater.scala:27:25]
wire [1:0] auto_clock_gater_in_1_a_bits_size_0 = auto_clock_gater_in_1_a_bits_size; // @[TileClockGater.scala:27:25]
wire [11:0] auto_clock_gater_in_1_a_bits_source_0 = auto_clock_gater_in_1_a_bits_source; // @[TileClockGater.scala:27:25]
wire [20:0] auto_clock_gater_in_1_a_bits_address_0 = auto_clock_gater_in_1_a_bits_address; // @[TileClockGater.scala:27:25]
wire [7:0] auto_clock_gater_in_1_a_bits_mask_0 = auto_clock_gater_in_1_a_bits_mask; // @[TileClockGater.scala:27:25]
wire [63:0] auto_clock_gater_in_1_a_bits_data_0 = auto_clock_gater_in_1_a_bits_data; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_a_bits_corrupt_0 = auto_clock_gater_in_1_a_bits_corrupt; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_ready_0 = auto_clock_gater_in_1_d_ready; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_0_member_allClocks_uncore_clock_0 = auto_clock_gater_in_0_member_allClocks_uncore_clock; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_0_member_allClocks_uncore_reset_0 = auto_clock_gater_in_0_member_allClocks_uncore_reset; // @[TileClockGater.scala:27:25]
wire [1:0] _out_frontSel_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _out_backSel_T = 2'h1; // @[OneHot.scala:58:35]
wire [8:0] out_maskMatch = 9'h1FF; // @[RegisterRouter.scala:87:24]
wire out_frontSel_0 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_backSel_0 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_rofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_iready = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_oready = 1'h1; // @[RegisterRouter.scala:87:24]
wire [2:0] clock_gaterIn_d_bits_d_opcode = 3'h0; // @[Edges.scala:792:17]
wire [63:0] clock_gaterIn_d_bits_d_data = 64'h0; // @[Edges.scala:792:17]
wire auto_clock_gater_in_1_d_bits_sink = 1'h0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_bits_denied = 1'h0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_bits_corrupt = 1'h0; // @[TileClockGater.scala:27:25]
wire clock_gaterIn_1_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire out_frontSel_1 = 1'h0; // @[RegisterRouter.scala:87:24]
wire out_backSel_1 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_6 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wifireMux_T_7 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_rofireMux_T_6 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wofireMux_T_7 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T_2 = 1'h0; // @[MuxLiteral.scala:49:17]
wire clock_gaterIn_d_bits_d_sink = 1'h0; // @[Edges.scala:792:17]
wire clock_gaterIn_d_bits_d_denied = 1'h0; // @[Edges.scala:792:17]
wire clock_gaterIn_d_bits_d_corrupt = 1'h0; // @[Edges.scala:792:17]
wire [1:0] auto_clock_gater_in_1_d_bits_param = 2'h0; // @[TileClockGater.scala:27:25]
wire clock_gaterIn_1_a_ready; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_1_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_d_bits_d_param = 2'h0; // @[Edges.scala:792:17]
wire clock_gaterIn_1_a_valid = auto_clock_gater_in_1_a_valid_0; // @[MixedNode.scala:551:17]
wire [2:0] clock_gaterIn_1_a_bits_opcode = auto_clock_gater_in_1_a_bits_opcode_0; // @[MixedNode.scala:551:17]
wire [2:0] clock_gaterIn_1_a_bits_param = auto_clock_gater_in_1_a_bits_param_0; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_1_a_bits_size = auto_clock_gater_in_1_a_bits_size_0; // @[MixedNode.scala:551:17]
wire [11:0] clock_gaterIn_1_a_bits_source = auto_clock_gater_in_1_a_bits_source_0; // @[MixedNode.scala:551:17]
wire [20:0] clock_gaterIn_1_a_bits_address = auto_clock_gater_in_1_a_bits_address_0; // @[MixedNode.scala:551:17]
wire [7:0] clock_gaterIn_1_a_bits_mask = auto_clock_gater_in_1_a_bits_mask_0; // @[MixedNode.scala:551:17]
wire [63:0] clock_gaterIn_1_a_bits_data = auto_clock_gater_in_1_a_bits_data_0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_a_bits_corrupt = auto_clock_gater_in_1_a_bits_corrupt_0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_ready = auto_clock_gater_in_1_d_ready_0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] clock_gaterIn_1_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_1_d_bits_size; // @[MixedNode.scala:551:17]
wire [11:0] clock_gaterIn_1_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] clock_gaterIn_1_d_bits_data; // @[MixedNode.scala:551:17]
wire clock_gaterIn_member_allClocks_uncore_clock = auto_clock_gater_in_0_member_allClocks_uncore_clock_0; // @[MixedNode.scala:551:17]
wire clock_gaterOut_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17]
wire clock_gaterIn_member_allClocks_uncore_reset = auto_clock_gater_in_0_member_allClocks_uncore_reset_0; // @[MixedNode.scala:551:17]
wire clock_gaterOut_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17]
wire auto_clock_gater_in_1_a_ready_0; // @[TileClockGater.scala:27:25]
wire [2:0] auto_clock_gater_in_1_d_bits_opcode_0; // @[TileClockGater.scala:27:25]
wire [1:0] auto_clock_gater_in_1_d_bits_size_0; // @[TileClockGater.scala:27:25]
wire [11:0] auto_clock_gater_in_1_d_bits_source_0; // @[TileClockGater.scala:27:25]
wire [63:0] auto_clock_gater_in_1_d_bits_data_0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_valid_0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_out_member_allClocks_uncore_clock_0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_out_member_allClocks_uncore_reset_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_out_member_allClocks_uncore_clock_0 = clock_gaterOut_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17]
assign auto_clock_gater_out_member_allClocks_uncore_reset_0 = clock_gaterOut_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17]
assign clock_gaterOut_member_allClocks_uncore_clock = clock_gaterIn_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17, :551:17]
assign clock_gaterOut_member_allClocks_uncore_reset = clock_gaterIn_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17, :551:17]
wire in_ready; // @[RegisterRouter.scala:73:18]
assign auto_clock_gater_in_1_a_ready_0 = clock_gaterIn_1_a_ready; // @[MixedNode.scala:551:17]
wire in_valid = clock_gaterIn_1_a_valid; // @[RegisterRouter.scala:73:18]
wire [1:0] in_bits_extra_tlrr_extra_size = clock_gaterIn_1_a_bits_size; // @[RegisterRouter.scala:73:18]
wire [11:0] in_bits_extra_tlrr_extra_source = clock_gaterIn_1_a_bits_source; // @[RegisterRouter.scala:73:18]
wire [7:0] in_bits_mask = clock_gaterIn_1_a_bits_mask; // @[RegisterRouter.scala:73:18]
wire [63:0] in_bits_data = clock_gaterIn_1_a_bits_data; // @[RegisterRouter.scala:73:18]
wire out_ready = clock_gaterIn_1_d_ready; // @[RegisterRouter.scala:87:24]
wire out_valid; // @[RegisterRouter.scala:87:24]
assign auto_clock_gater_in_1_d_valid_0 = clock_gaterIn_1_d_valid; // @[MixedNode.scala:551:17]
assign auto_clock_gater_in_1_d_bits_opcode_0 = clock_gaterIn_1_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_d_bits_d_size; // @[Edges.scala:792:17]
assign auto_clock_gater_in_1_d_bits_size_0 = clock_gaterIn_1_d_bits_size; // @[MixedNode.scala:551:17]
wire [11:0] clock_gaterIn_d_bits_d_source; // @[Edges.scala:792:17]
assign auto_clock_gater_in_1_d_bits_source_0 = clock_gaterIn_1_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] out_bits_data; // @[RegisterRouter.scala:87:24]
assign auto_clock_gater_in_1_d_bits_data_0 = clock_gaterIn_1_d_bits_data; // @[MixedNode.scala:551:17]
wire _out_in_ready_T; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_1_a_ready = in_ready; // @[RegisterRouter.scala:73:18]
wire _in_bits_read_T; // @[RegisterRouter.scala:74:36]
wire _out_front_valid_T = in_valid; // @[RegisterRouter.scala:73:18, :87:24]
wire out_front_bits_read = in_bits_read; // @[RegisterRouter.scala:73:18, :87:24]
wire [8:0] out_front_bits_index = in_bits_index; // @[RegisterRouter.scala:73:18, :87:24]
wire [63:0] out_front_bits_data = in_bits_data; // @[RegisterRouter.scala:73:18, :87:24]
wire [7:0] out_front_bits_mask = in_bits_mask; // @[RegisterRouter.scala:73:18, :87:24]
wire [11:0] out_front_bits_extra_tlrr_extra_source = in_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:73:18, :87:24]
wire [1:0] out_front_bits_extra_tlrr_extra_size = in_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:73:18, :87:24]
assign _in_bits_read_T = clock_gaterIn_1_a_bits_opcode == 3'h4; // @[RegisterRouter.scala:74:36]
assign in_bits_read = _in_bits_read_T; // @[RegisterRouter.scala:73:18, :74:36]
wire [17:0] _in_bits_index_T = clock_gaterIn_1_a_bits_address[20:3]; // @[Edges.scala:192:34]
assign in_bits_index = _in_bits_index_T[8:0]; // @[RegisterRouter.scala:73:18, :75:19]
wire _out_front_ready_T = out_ready; // @[RegisterRouter.scala:87:24]
wire _out_out_valid_T; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_1_d_valid = out_valid; // @[RegisterRouter.scala:87:24]
wire _clock_gaterIn_d_bits_opcode_T = out_bits_read; // @[RegisterRouter.scala:87:24, :105:25]
assign clock_gaterIn_1_d_bits_data = out_bits_data; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_d_bits_d_source = out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [1:0] out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_d_bits_d_size = out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign _out_in_ready_T = out_front_ready; // @[RegisterRouter.scala:87:24]
assign _out_out_valid_T = out_front_valid; // @[RegisterRouter.scala:87:24]
assign out_bits_read = out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire [8:0] out_findex = out_front_bits_index; // @[RegisterRouter.scala:87:24]
wire [8:0] out_bindex = out_front_bits_index; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_source = out_front_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_size = out_front_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
wire _out_T = out_findex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_T_1 = out_bindex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_0 = _out_T_1; // @[MuxLiteral.scala:49:48]
wire out_rivalid_0; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire out_wivalid_0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire out_roready_0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire out_woready_0; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_8 = {8{_out_frontMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_9 = {8{_out_frontMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_10 = {8{_out_frontMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_11 = {8{_out_frontMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_12 = {8{_out_frontMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_13 = {8{_out_frontMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_14 = {8{_out_frontMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_15 = {8{_out_frontMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_lo = {_out_frontMask_T_9, _out_frontMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_hi = {_out_frontMask_T_11, _out_frontMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_lo = {out_frontMask_lo_hi, out_frontMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_lo = {_out_frontMask_T_13, _out_frontMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_hi = {_out_frontMask_T_15, _out_frontMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_hi = {out_frontMask_hi_hi, out_frontMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_frontMask = {out_frontMask_hi, out_frontMask_lo}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_8 = {8{_out_backMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_9 = {8{_out_backMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_10 = {8{_out_backMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_11 = {8{_out_backMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_12 = {8{_out_backMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_13 = {8{_out_backMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_14 = {8{_out_backMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_15 = {8{_out_backMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_lo = {_out_backMask_T_9, _out_backMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_hi = {_out_backMask_T_11, _out_backMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_lo = {out_backMask_lo_hi, out_backMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_lo = {_out_backMask_T_13, _out_backMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_hi = {_out_backMask_T_15, _out_backMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_hi = {out_backMask_hi_hi, out_backMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_backMask = {out_backMask_hi, out_backMask_lo}; // @[RegisterRouter.scala:87:24]
wire _out_rimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_wimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire out_rimask = _out_rimask_T; // @[RegisterRouter.scala:87:24]
wire out_wimask = _out_wimask_T; // @[RegisterRouter.scala:87:24]
wire _out_romask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_womask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire out_romask = _out_romask_T; // @[RegisterRouter.scala:87:24]
wire out_womask = _out_womask_T; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid = out_rivalid_0 & out_rimask; // @[RegisterRouter.scala:87:24]
wire out_f_roready = out_roready_0 & out_romask; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid = out_wivalid_0 & out_wimask; // @[RegisterRouter.scala:87:24]
wire out_f_woready = out_woready_0 & out_womask; // @[RegisterRouter.scala:87:24]
wire _out_T_2 = out_front_bits_data[0]; // @[RegisterRouter.scala:87:24]
wire _out_T_3 = ~out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_4 = ~out_wimask; // @[RegisterRouter.scala:87:24]
wire _out_T_5 = ~out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_6 = ~out_womask; // @[RegisterRouter.scala:87:24]
wire _out_T_7; // @[RegisterRouter.scala:87:24]
wire _out_T_8 = _out_T_7; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_1_0 = _out_T_8; // @[MuxLiteral.scala:49:48]
wire _GEN = in_valid & out_front_ready; // @[RegisterRouter.scala:73:18, :87:24]
wire _out_rifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T = _GEN; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T = _GEN; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_1 = _out_rifireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_2 = _out_rifireMux_T_1; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_3 = _out_rifireMux_T_2 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_rivalid_0 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_4 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_2 = _out_wifireMux_T & _out_wifireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_3 = _out_wifireMux_T_2; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_4 = _out_wifireMux_T_3 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_wivalid_0 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_5 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _GEN_0 = out_front_valid & out_ready; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_1 = _out_rofireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_2 = _out_rofireMux_T_1; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_3 = _out_rofireMux_T_2 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_roready_0 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_4 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_2 = _out_wofireMux_T & _out_wofireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_3 = _out_wofireMux_T_2; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_4 = _out_wofireMux_T_3 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_woready_0 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_5 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
assign in_ready = _out_in_ready_T; // @[RegisterRouter.scala:73:18, :87:24]
assign out_front_valid = _out_front_valid_T; // @[RegisterRouter.scala:87:24]
assign out_front_ready = _out_front_ready_T; // @[RegisterRouter.scala:87:24]
assign out_valid = _out_out_valid_T; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_T_1 = _out_out_bits_data_WIRE_0; // @[MuxLiteral.scala:49:{10,48}]
wire _out_out_bits_data_T_3 = _out_out_bits_data_WIRE_1_0; // @[MuxLiteral.scala:49:{10,48}]
wire _out_out_bits_data_T_4 = _out_out_bits_data_T_1 & _out_out_bits_data_T_3; // @[MuxLiteral.scala:49:10]
assign out_bits_data = {63'h0, _out_out_bits_data_T_4}; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_1_d_bits_size = clock_gaterIn_d_bits_d_size; // @[Edges.scala:792:17]
assign clock_gaterIn_1_d_bits_source = clock_gaterIn_d_bits_d_source; // @[Edges.scala:792:17]
assign clock_gaterIn_1_d_bits_opcode = {2'h0, _clock_gaterIn_d_bits_opcode_T}; // @[RegisterRouter.scala:105:{19,25}]
TLMonitor_64 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (clock_gaterIn_1_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (clock_gaterIn_1_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (clock_gaterIn_1_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (clock_gaterIn_1_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (clock_gaterIn_1_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (clock_gaterIn_1_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (clock_gaterIn_1_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (clock_gaterIn_1_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (clock_gaterIn_1_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (clock_gaterIn_1_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (clock_gaterIn_1_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (clock_gaterIn_1_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (clock_gaterIn_1_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (clock_gaterIn_1_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (clock_gaterIn_1_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (clock_gaterIn_1_d_bits_data) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
AsyncResetRegVec_w1_i1 regs_0 ( // @[TileClockGater.scala:33:53]
.clock (clock),
.reset (clock_gaterIn_member_allClocks_uncore_reset), // @[MixedNode.scala:551:17]
.io_d (_out_T_2), // @[RegisterRouter.scala:87:24]
.io_q (_out_T_7),
.io_en (out_f_woready) // @[RegisterRouter.scala:87:24]
); // @[TileClockGater.scala:33:53]
assign auto_clock_gater_in_1_a_ready = auto_clock_gater_in_1_a_ready_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_valid = auto_clock_gater_in_1_d_valid_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_opcode = auto_clock_gater_in_1_d_bits_opcode_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_size = auto_clock_gater_in_1_d_bits_size_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_source = auto_clock_gater_in_1_d_bits_source_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_data = auto_clock_gater_in_1_d_bits_data_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_out_member_allClocks_uncore_clock = auto_clock_gater_out_member_allClocks_uncore_clock_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_out_member_allClocks_uncore_reset = auto_clock_gater_out_member_allClocks_uncore_reset_0; // @[TileClockGater.scala:27:25]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File tage.scala:
package boom.v4.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v4.common._
import boom.v4.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int, val singlePorted: Boolean)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f2_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val us = SyncReadMem(nRows, Vec(bankWidth*2, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
us.suggestName(s"tage_u_${histLength}")
table.suggestName(s"tage_table_${histLength}")
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = Wire(Vec(bankWidth, new TageEntry))
val s2_req_rus = Wire(Vec(bankWidth*2, Bool()))
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f2_resp(w).valid := s2_req_rhits(w)
io.f2_resp(w).bits.u := Cat(s2_req_rus(w*2+1), s2_req_rus(w*2))
io.f2_resp(w).bits.ctr := s2_req_rtage(w).ctr
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val clear_u_hi = clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val clear_u_lo = clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val clear_u_mask = VecInit((0 until bankWidth*2) map { i => if (i % 2 == 0) clear_u_lo else clear_u_hi }).asUInt
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
val wen = WireInit(doing_reset || io.update_mask.reduce(_||_))
val rdata = if (singlePorted) table.read(s1_hashed_idx, !wen && io.f1_req_valid) else table.read(s1_hashed_idx, io.f1_req_valid)
when (RegNext(wen) && singlePorted.B) {
s2_req_rtage := 0.U.asTypeOf(Vec(bankWidth, new TageEntry))
} .otherwise {
s2_req_rtage := VecInit(rdata.map(_.asTypeOf(new TageEntry)))
}
when (wen) {
val widx = Mux(doing_reset, reset_idx, update_idx)
val wdata = Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt)))
val wmask = Mux(doing_reset, ~(0.U(bankWidth.W)), io.update_mask.asUInt)
table.write(widx, wdata, wmask.asBools)
}
val update_u_mask = VecInit((0 until bankWidth*2) map {i => io.update_u_mask(i / 2)})
val update_u_wen = WireInit(doing_reset || doing_clear_u || update_u_mask.reduce(_||_))
val u_rdata = if (singlePorted) {
us.read(s1_hashed_idx, !update_u_wen && io.f1_req_valid)
} else {
us.read(s1_hashed_idx, io.f1_req_valid)
}
s2_req_rus := u_rdata
when (update_u_wen) {
val widx = Mux(doing_reset, reset_idx, Mux(doing_clear_u, clear_u_idx, update_idx))
val wdata = Mux(doing_reset || doing_clear_u, VecInit(0.U((bankWidth*2).W).asBools), VecInit(io.update_u.asUInt.asBools))
val wmask = Mux(doing_reset, ~(0.U((bankWidth*2).W)), Mux(doing_clear_u, clear_u_mask, update_u_mask.asUInt))
us.write(widx, wdata, wmask.asBools)
}
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048,
singlePorted: Boolean = false
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod, params.singlePorted))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(bankAlign(io.f0_pc))
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f2_resps = VecInit(tables.map(_.io.f2_resp))
val f3_resps = RegNext(f2_resps)
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var s2_provided = false.B
var s2_provider = 0.U
var s2_alt_provided = false.B
var s2_alt_provider = 0.U
for (i <- 0 until tageNTables) {
val hit = f2_resps(i)(w).valid
s2_alt_provided = s2_alt_provided || (s2_provided && hit)
s2_provided = s2_provided || hit
s2_alt_provider = Mux(hit, s2_provider, s2_alt_provider)
s2_provider = Mux(hit, i.U, s2_provider)
}
val s3_provided = RegNext(s2_provided)
val s3_provider = RegNext(s2_provider)
val s3_alt_provided = RegNext(s2_alt_provided)
val s3_alt_provider = RegNext(s2_alt_provider)
val prov = RegNext(f2_resps(s2_provider)(w).bits)
val alt = RegNext(f2_resps(s2_alt_provider)(w).bits)
io.resp.f3(w).taken := Mux(s3_provided,
Mux(prov.ctr === 3.U || prov.ctr === 4.U,
Mux(s3_alt_provided, alt.ctr(2), io.resp_in(0).f3(w).taken),
prov.ctr(2)),
io.resp_in(0).f3(w).taken
)
f3_meta.provider(w).valid := s3_provided
f3_meta.provider(w).bits := s3_provider
f3_meta.alt_differs(w) := s3_alt_provided && alt.ctr(2) =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := prov.u
f3_meta.provider_ctr(w) := prov.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(f3_meta.provider(w).bits)) & Fill(tageNTables, f3_meta.provider(w).valid))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
}
| module tage_table_2_0( // @[tage.scala:90:27]
input [6:0] R0_addr,
input R0_en,
input R0_clk,
output [43:0] R0_data,
input [6:0] W0_addr,
input W0_en,
input W0_clk,
input [43:0] W0_data,
input [3:0] W0_mask
);
tage_table_2_ext tage_table_2_ext ( // @[tage.scala:90:27]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
); // @[tage.scala:90:27]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Repeater.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
}
| module Repeater_TLBundleA_a29d128s8k1z4u( // @[Repeater.scala:10:7]
input clock, // @[Repeater.scala:10:7]
input reset, // @[Repeater.scala:10:7]
input io_repeat, // @[Repeater.scala:13:14]
output io_enq_ready, // @[Repeater.scala:13:14]
input io_enq_valid, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_opcode, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_param, // @[Repeater.scala:13:14]
input [3:0] io_enq_bits_size, // @[Repeater.scala:13:14]
input [7:0] io_enq_bits_source, // @[Repeater.scala:13:14]
input [28:0] io_enq_bits_address, // @[Repeater.scala:13:14]
input [15:0] io_enq_bits_mask, // @[Repeater.scala:13:14]
input [127:0] io_enq_bits_data, // @[Repeater.scala:13:14]
input io_enq_bits_corrupt, // @[Repeater.scala:13:14]
input io_deq_ready, // @[Repeater.scala:13:14]
output io_deq_valid, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_opcode, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_param, // @[Repeater.scala:13:14]
output [3:0] io_deq_bits_size, // @[Repeater.scala:13:14]
output [7:0] io_deq_bits_source, // @[Repeater.scala:13:14]
output [28:0] io_deq_bits_address, // @[Repeater.scala:13:14]
output [15:0] io_deq_bits_mask, // @[Repeater.scala:13:14]
output [127:0] io_deq_bits_data, // @[Repeater.scala:13:14]
output io_deq_bits_corrupt // @[Repeater.scala:13:14]
);
wire io_repeat_0 = io_repeat; // @[Repeater.scala:10:7]
wire io_enq_valid_0 = io_enq_valid; // @[Repeater.scala:10:7]
wire [2:0] io_enq_bits_opcode_0 = io_enq_bits_opcode; // @[Repeater.scala:10:7]
wire [2:0] io_enq_bits_param_0 = io_enq_bits_param; // @[Repeater.scala:10:7]
wire [3:0] io_enq_bits_size_0 = io_enq_bits_size; // @[Repeater.scala:10:7]
wire [7:0] io_enq_bits_source_0 = io_enq_bits_source; // @[Repeater.scala:10:7]
wire [28:0] io_enq_bits_address_0 = io_enq_bits_address; // @[Repeater.scala:10:7]
wire [15:0] io_enq_bits_mask_0 = io_enq_bits_mask; // @[Repeater.scala:10:7]
wire [127:0] io_enq_bits_data_0 = io_enq_bits_data; // @[Repeater.scala:10:7]
wire io_enq_bits_corrupt_0 = io_enq_bits_corrupt; // @[Repeater.scala:10:7]
wire io_deq_ready_0 = io_deq_ready; // @[Repeater.scala:10:7]
wire _io_enq_ready_T_1; // @[Repeater.scala:25:32]
wire _io_deq_valid_T; // @[Repeater.scala:24:32]
wire [2:0] _io_deq_bits_T_opcode; // @[Repeater.scala:26:21]
wire [2:0] _io_deq_bits_T_param; // @[Repeater.scala:26:21]
wire [3:0] _io_deq_bits_T_size; // @[Repeater.scala:26:21]
wire [7:0] _io_deq_bits_T_source; // @[Repeater.scala:26:21]
wire [28:0] _io_deq_bits_T_address; // @[Repeater.scala:26:21]
wire [15:0] _io_deq_bits_T_mask; // @[Repeater.scala:26:21]
wire [127:0] _io_deq_bits_T_data; // @[Repeater.scala:26:21]
wire _io_deq_bits_T_corrupt; // @[Repeater.scala:26:21]
wire io_enq_ready_0; // @[Repeater.scala:10:7]
wire [2:0] io_deq_bits_opcode_0; // @[Repeater.scala:10:7]
wire [2:0] io_deq_bits_param_0; // @[Repeater.scala:10:7]
wire [3:0] io_deq_bits_size_0; // @[Repeater.scala:10:7]
wire [7:0] io_deq_bits_source_0; // @[Repeater.scala:10:7]
wire [28:0] io_deq_bits_address_0; // @[Repeater.scala:10:7]
wire [15:0] io_deq_bits_mask_0; // @[Repeater.scala:10:7]
wire [127:0] io_deq_bits_data_0; // @[Repeater.scala:10:7]
wire io_deq_bits_corrupt_0; // @[Repeater.scala:10:7]
wire io_deq_valid_0; // @[Repeater.scala:10:7]
wire io_full; // @[Repeater.scala:10:7]
reg full; // @[Repeater.scala:20:21]
assign io_full = full; // @[Repeater.scala:10:7, :20:21]
reg [2:0] saved_opcode; // @[Repeater.scala:21:18]
reg [2:0] saved_param; // @[Repeater.scala:21:18]
reg [3:0] saved_size; // @[Repeater.scala:21:18]
reg [7:0] saved_source; // @[Repeater.scala:21:18]
reg [28:0] saved_address; // @[Repeater.scala:21:18]
reg [15:0] saved_mask; // @[Repeater.scala:21:18]
reg [127:0] saved_data; // @[Repeater.scala:21:18]
reg saved_corrupt; // @[Repeater.scala:21:18]
assign _io_deq_valid_T = io_enq_valid_0 | full; // @[Repeater.scala:10:7, :20:21, :24:32]
assign io_deq_valid_0 = _io_deq_valid_T; // @[Repeater.scala:10:7, :24:32]
wire _io_enq_ready_T = ~full; // @[Repeater.scala:20:21, :25:35]
assign _io_enq_ready_T_1 = io_deq_ready_0 & _io_enq_ready_T; // @[Repeater.scala:10:7, :25:{32,35}]
assign io_enq_ready_0 = _io_enq_ready_T_1; // @[Repeater.scala:10:7, :25:32]
assign _io_deq_bits_T_opcode = full ? saved_opcode : io_enq_bits_opcode_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_param = full ? saved_param : io_enq_bits_param_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_size = full ? saved_size : io_enq_bits_size_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_source = full ? saved_source : io_enq_bits_source_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_address = full ? saved_address : io_enq_bits_address_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_mask = full ? saved_mask : io_enq_bits_mask_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_data = full ? saved_data : io_enq_bits_data_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_corrupt = full ? saved_corrupt : io_enq_bits_corrupt_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign io_deq_bits_opcode_0 = _io_deq_bits_T_opcode; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_param_0 = _io_deq_bits_T_param; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_size_0 = _io_deq_bits_T_size; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_source_0 = _io_deq_bits_T_source; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_address_0 = _io_deq_bits_T_address; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_mask_0 = _io_deq_bits_T_mask; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_data_0 = _io_deq_bits_T_data; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_corrupt_0 = _io_deq_bits_T_corrupt; // @[Repeater.scala:10:7, :26:21]
wire _T_1 = io_enq_ready_0 & io_enq_valid_0 & io_repeat_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[Repeater.scala:10:7]
if (reset) // @[Repeater.scala:10:7]
full <= 1'h0; // @[Repeater.scala:20:21]
else // @[Repeater.scala:10:7]
full <= ~(io_deq_ready_0 & io_deq_valid_0 & ~io_repeat_0) & (_T_1 | full); // @[Decoupled.scala:51:35]
if (_T_1) begin // @[Decoupled.scala:51:35]
saved_opcode <= io_enq_bits_opcode_0; // @[Repeater.scala:10:7, :21:18]
saved_param <= io_enq_bits_param_0; // @[Repeater.scala:10:7, :21:18]
saved_size <= io_enq_bits_size_0; // @[Repeater.scala:10:7, :21:18]
saved_source <= io_enq_bits_source_0; // @[Repeater.scala:10:7, :21:18]
saved_address <= io_enq_bits_address_0; // @[Repeater.scala:10:7, :21:18]
saved_mask <= io_enq_bits_mask_0; // @[Repeater.scala:10:7, :21:18]
saved_data <= io_enq_bits_data_0; // @[Repeater.scala:10:7, :21:18]
saved_corrupt <= io_enq_bits_corrupt_0; // @[Repeater.scala:10:7, :21:18]
end
always @(posedge)
assign io_enq_ready = io_enq_ready_0; // @[Repeater.scala:10:7]
assign io_deq_valid = io_deq_valid_0; // @[Repeater.scala:10:7]
assign io_deq_bits_opcode = io_deq_bits_opcode_0; // @[Repeater.scala:10:7]
assign io_deq_bits_param = io_deq_bits_param_0; // @[Repeater.scala:10:7]
assign io_deq_bits_size = io_deq_bits_size_0; // @[Repeater.scala:10:7]
assign io_deq_bits_source = io_deq_bits_source_0; // @[Repeater.scala:10:7]
assign io_deq_bits_address = io_deq_bits_address_0; // @[Repeater.scala:10:7]
assign io_deq_bits_mask = io_deq_bits_mask_0; // @[Repeater.scala:10:7]
assign io_deq_bits_data = io_deq_bits_data_0; // @[Repeater.scala:10:7]
assign io_deq_bits_corrupt = io_deq_bits_corrupt_0; // @[Repeater.scala:10:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Counters.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
// Produces 0-width value when counting to 1
class ZCounter(val n: Int) {
val value = RegInit(0.U(log2Ceil(n).W))
def inc(): Bool = {
if (n == 1) true.B
else {
val wrap = value === (n-1).U
value := Mux(!isPow2(n).B && wrap, 0.U, value + 1.U)
wrap
}
}
}
object ZCounter {
def apply(n: Int) = new ZCounter(n)
def apply(cond: Bool, n: Int): (UInt, Bool) = {
val c = new ZCounter(n)
var wrap: Bool = null
when (cond) { wrap = c.inc() }
(c.value, cond && wrap)
}
}
object TwoWayCounter {
def apply(up: Bool, down: Bool, max: Int): UInt = {
val cnt = RegInit(0.U(log2Up(max + 1).W))
when (up && !down) { cnt := cnt + 1.U }
when (down && !up) { cnt := cnt - 1.U }
cnt
}
}
// a counter that clock gates most of its MSBs using the LSB carry-out
case class WideCounter(width: Int, inc: UInt = 1.U, reset: Boolean = true, inhibit: Bool = false.B) {
private val isWide = width > (2 * inc.getWidth)
private val smallWidth = if (isWide) inc.getWidth max log2Up(width) else width
private val small = if (reset) RegInit(0.U(smallWidth.W)) else Reg(UInt(smallWidth.W))
private val nextSmall = small +& inc
when (!inhibit) { small := nextSmall }
private val large = if (isWide) {
val r = if (reset) RegInit(0.U((width - smallWidth).W)) else Reg(UInt((width - smallWidth).W))
when (nextSmall(smallWidth) && !inhibit) { r := r + 1.U }
r
} else null
val value = if (isWide) Cat(large, small) else small
lazy val carryOut = {
val lo = (small ^ nextSmall) >> 1
if (!isWide)
lo
else {
val hi = Mux(nextSmall(smallWidth), large ^ (large +& 1.U), 0.U) >> 1
Cat(hi, lo)
}
}
def := (x: UInt) = {
small := x
if (isWide) large := x >> smallWidth
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File CSR.scala:
// See LICENSE.SiFive for license details.
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{BitPat, Cat, Fill, Mux1H, PopCount, PriorityMux, RegEnable, UIntToOH, Valid, log2Ceil, log2Up}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.devices.debug.DebugModuleKey
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
import scala.collection.mutable.LinkedHashMap
import Instructions._
import CustomInstructions._
class MStatus extends Bundle {
// not truly part of mstatus, but convenient
val debug = Bool()
val cease = Bool()
val wfi = Bool()
val isa = UInt(32.W)
val dprv = UInt(PRV.SZ.W) // effective prv for data accesses
val dv = Bool() // effective v for data accesses
val prv = UInt(PRV.SZ.W)
val v = Bool()
val sd = Bool()
val zero2 = UInt(23.W)
val mpv = Bool()
val gva = Bool()
val mbe = Bool()
val sbe = Bool()
val sxl = UInt(2.W)
val uxl = UInt(2.W)
val sd_rv32 = Bool()
val zero1 = UInt(8.W)
val tsr = Bool()
val tw = Bool()
val tvm = Bool()
val mxr = Bool()
val sum = Bool()
val mprv = Bool()
val xs = UInt(2.W)
val fs = UInt(2.W)
val mpp = UInt(2.W)
val vs = UInt(2.W)
val spp = UInt(1.W)
val mpie = Bool()
val ube = Bool()
val spie = Bool()
val upie = Bool()
val mie = Bool()
val hie = Bool()
val sie = Bool()
val uie = Bool()
}
class MNStatus extends Bundle {
val mpp = UInt(2.W)
val zero3 = UInt(3.W)
val mpv = Bool()
val zero2 = UInt(3.W)
val mie = Bool()
val zero1 = UInt(3.W)
}
class HStatus extends Bundle {
val zero6 = UInt(30.W)
val vsxl = UInt(2.W)
val zero5 = UInt(9.W)
val vtsr = Bool()
val vtw = Bool()
val vtvm = Bool()
val zero3 = UInt(2.W)
val vgein = UInt(6.W)
val zero2 = UInt(2.W)
val hu = Bool()
val spvp = Bool()
val spv = Bool()
val gva = Bool()
val vsbe = Bool()
val zero1 = UInt(5.W)
}
class DCSR extends Bundle {
val xdebugver = UInt(2.W)
val zero4 = UInt(2.W)
val zero3 = UInt(12.W)
val ebreakm = Bool()
val ebreakh = Bool()
val ebreaks = Bool()
val ebreaku = Bool()
val zero2 = Bool()
val stopcycle = Bool()
val stoptime = Bool()
val cause = UInt(3.W)
val v = Bool()
val zero1 = UInt(2.W)
val step = Bool()
val prv = UInt(PRV.SZ.W)
}
class MIP(implicit p: Parameters) extends CoreBundle()(p)
with HasCoreParameters {
val lip = Vec(coreParams.nLocalInterrupts, Bool())
val zero1 = Bool()
val debug = Bool() // keep in sync with CSR.debugIntCause
val rocc = Bool()
val sgeip = Bool()
val meip = Bool()
val vseip = Bool()
val seip = Bool()
val ueip = Bool()
val mtip = Bool()
val vstip = Bool()
val stip = Bool()
val utip = Bool()
val msip = Bool()
val vssip = Bool()
val ssip = Bool()
val usip = Bool()
}
class Envcfg extends Bundle {
val stce = Bool() // only for menvcfg/henvcfg
val pbmte = Bool() // only for menvcfg/henvcfg
val zero54 = UInt(54.W)
val cbze = Bool()
val cbcfe = Bool()
val cbie = UInt(2.W)
val zero3 = UInt(3.W)
val fiom = Bool()
def write(wdata: UInt) {
val new_envcfg = wdata.asTypeOf(new Envcfg)
fiom := new_envcfg.fiom // only FIOM is writable currently
}
}
class PTBR(implicit p: Parameters) extends CoreBundle()(p) {
def additionalPgLevels = mode.extract(log2Ceil(pgLevels-minPgLevels+1)-1, 0)
def pgLevelsToMode(i: Int) = (xLen, i) match {
case (32, 2) => 1
case (64, x) if x >= 3 && x <= 6 => x + 5
}
val (modeBits, maxASIdBits) = xLen match {
case 32 => (1, 9)
case 64 => (4, 16)
}
require(modeBits + maxASIdBits + maxPAddrBits - pgIdxBits == xLen)
val mode = UInt(modeBits.W)
val asid = UInt(maxASIdBits.W)
val ppn = UInt((maxPAddrBits - pgIdxBits).W)
}
object PRV
{
val SZ = 2
val U = 0
val S = 1
val H = 2
val M = 3
}
object CSR
{
// commands
val SZ = 3
def X = BitPat.dontCare(SZ)
def N = 0.U(SZ.W)
def R = 2.U(SZ.W)
def I = 4.U(SZ.W)
def W = 5.U(SZ.W)
def S = 6.U(SZ.W)
def C = 7.U(SZ.W)
// mask a CSR cmd with a valid bit
def maskCmd(valid: Bool, cmd: UInt): UInt = {
// all commands less than CSR.I are treated by CSRFile as NOPs
cmd & ~Mux(valid, 0.U, CSR.I)
}
val ADDRSZ = 12
def modeLSB: Int = 8
def mode(addr: Int): Int = (addr >> modeLSB) % (1 << PRV.SZ)
def mode(addr: UInt): UInt = addr(modeLSB + PRV.SZ - 1, modeLSB)
def busErrorIntCause = 128
def debugIntCause = 14 // keep in sync with MIP.debug
def debugTriggerCause = {
val res = debugIntCause
require(!(Causes.all contains res))
res
}
def rnmiIntCause = 13 // NMI: Higher numbers = higher priority, must not reuse debugIntCause
def rnmiBEUCause = 12
val firstCtr = CSRs.cycle
val firstCtrH = CSRs.cycleh
val firstHPC = CSRs.hpmcounter3
val firstHPCH = CSRs.hpmcounter3h
val firstHPE = CSRs.mhpmevent3
val firstMHPC = CSRs.mhpmcounter3
val firstMHPCH = CSRs.mhpmcounter3h
val firstHPM = 3
val nCtr = 32
val nHPM = nCtr - firstHPM
val hpmWidth = 40
val maxPMPs = 16
}
class PerfCounterIO(implicit p: Parameters) extends CoreBundle
with HasCoreParameters {
val eventSel = Output(UInt(xLen.W))
val inc = Input(UInt(log2Ceil(1+retireWidth).W))
}
class TracedInstruction(implicit p: Parameters) extends CoreBundle {
val valid = Bool()
val iaddr = UInt(coreMaxAddrBits.W)
val insn = UInt(iLen.W)
val priv = UInt(3.W)
val exception = Bool()
val interrupt = Bool()
val cause = UInt(xLen.W)
val tval = UInt((coreMaxAddrBits max iLen).W)
val wdata = Option.when(traceHasWdata)(UInt((vLen max xLen).W))
}
class TraceAux extends Bundle {
val enable = Bool()
val stall = Bool()
}
class CSRDecodeIO(implicit p: Parameters) extends CoreBundle {
val inst = Input(UInt(iLen.W))
def csr_addr = (inst >> 20)(CSR.ADDRSZ-1, 0)
val fp_illegal = Output(Bool())
val vector_illegal = Output(Bool())
val fp_csr = Output(Bool())
val vector_csr = Output(Bool())
val rocc_illegal = Output(Bool())
val read_illegal = Output(Bool())
val write_illegal = Output(Bool())
val write_flush = Output(Bool())
val system_illegal = Output(Bool())
val virtual_access_illegal = Output(Bool())
val virtual_system_illegal = Output(Bool())
}
class CSRFileIO(hasBeu: Boolean)(implicit p: Parameters) extends CoreBundle
with HasCoreParameters {
val ungated_clock = Input(Clock())
val interrupts = Input(new CoreInterrupts(hasBeu))
val hartid = Input(UInt(hartIdLen.W))
val rw = new Bundle {
val addr = Input(UInt(CSR.ADDRSZ.W))
val cmd = Input(Bits(CSR.SZ.W))
val rdata = Output(Bits(xLen.W))
val wdata = Input(Bits(xLen.W))
}
val decode = Vec(decodeWidth, new CSRDecodeIO)
val csr_stall = Output(Bool()) // stall retire for wfi
val rw_stall = Output(Bool()) // stall rw, rw will have no effect while rw_stall
val eret = Output(Bool())
val singleStep = Output(Bool())
val status = Output(new MStatus())
val hstatus = Output(new HStatus())
val gstatus = Output(new MStatus())
val ptbr = Output(new PTBR())
val hgatp = Output(new PTBR())
val vsatp = Output(new PTBR())
val evec = Output(UInt(vaddrBitsExtended.W))
val exception = Input(Bool())
val retire = Input(UInt(log2Up(1+retireWidth).W))
val cause = Input(UInt(xLen.W))
val pc = Input(UInt(vaddrBitsExtended.W))
val tval = Input(UInt(vaddrBitsExtended.W))
val htval = Input(UInt(((maxSVAddrBits + 1) min xLen).W))
val mhtinst_read_pseudo = Input(Bool())
val gva = Input(Bool())
val time = Output(UInt(xLen.W))
val fcsr_rm = Output(Bits(FPConstants.RM_SZ.W))
val fcsr_flags = Flipped(Valid(Bits(FPConstants.FLAGS_SZ.W)))
val set_fs_dirty = coreParams.haveFSDirty.option(Input(Bool()))
val rocc_interrupt = Input(Bool())
val interrupt = Output(Bool())
val interrupt_cause = Output(UInt(xLen.W))
val bp = Output(Vec(nBreakpoints, new BP))
val pmp = Output(Vec(nPMPs, new PMP))
val counters = Vec(nPerfCounters, new PerfCounterIO)
val csrw_counter = Output(UInt(CSR.nCtr.W))
val inhibit_cycle = Output(Bool())
val inst = Input(Vec(retireWidth, UInt(iLen.W)))
val trace = Output(Vec(retireWidth, new TracedInstruction))
val mcontext = Output(UInt(coreParams.mcontextWidth.W))
val scontext = Output(UInt(coreParams.scontextWidth.W))
val fiom = Output(Bool())
val vector = usingVector.option(new Bundle {
val vconfig = Output(new VConfig())
val vstart = Output(UInt(maxVLMax.log2.W))
val vxrm = Output(UInt(2.W))
val set_vs_dirty = Input(Bool())
val set_vconfig = Flipped(Valid(new VConfig))
val set_vstart = Flipped(Valid(vstart))
val set_vxsat = Input(Bool())
})
}
class VConfig(implicit p: Parameters) extends CoreBundle {
val vl = UInt((maxVLMax.log2 + 1).W)
val vtype = new VType
}
object VType {
def fromUInt(that: UInt, ignore_vill: Boolean = false)(implicit p: Parameters): VType = {
val res = 0.U.asTypeOf(new VType)
val in = that.asTypeOf(res)
val vill = (in.max_vsew.U < in.vsew) || !in.lmul_ok || in.reserved =/= 0.U || in.vill
when (!vill || ignore_vill.B) {
res := in
res.vsew := in.vsew(log2Ceil(1 + in.max_vsew) - 1, 0)
}
res.reserved := 0.U
res.vill := vill
res
}
def computeVL(avl: UInt, vtype: UInt, currentVL: UInt, useCurrentVL: Bool, useMax: Bool, useZero: Bool)(implicit p: Parameters): UInt =
VType.fromUInt(vtype, true).vl(avl, currentVL, useCurrentVL, useMax, useZero)
}
class VType(implicit p: Parameters) extends CoreBundle {
val vill = Bool()
val reserved = UInt((xLen - 9).W)
val vma = Bool()
val vta = Bool()
val vsew = UInt(3.W)
val vlmul_sign = Bool()
val vlmul_mag = UInt(2.W)
def vlmul_signed: SInt = Cat(vlmul_sign, vlmul_mag).asSInt
@deprecated("use vlmul_sign, vlmul_mag, or vlmul_signed", "RVV 0.9")
def vlmul: UInt = vlmul_mag
def max_vsew = log2Ceil(eLen/8)
def max_vlmul = (1 << vlmul_mag.getWidth) - 1
def lmul_ok: Bool = Mux(this.vlmul_sign, this.vlmul_mag =/= 0.U && ~this.vlmul_mag < max_vsew.U - this.vsew, true.B)
def minVLMax: Int = ((maxVLMax / eLen) >> ((1 << vlmul_mag.getWidth) - 1)) max 1
def vlMax: UInt = (maxVLMax.U >> (this.vsew +& Cat(this.vlmul_sign, ~this.vlmul_mag))).andNot((minVLMax-1).U)
def vl(avl: UInt, currentVL: UInt, useCurrentVL: Bool, useMax: Bool, useZero: Bool): UInt = {
val atLeastMaxVLMax = useMax || Mux(useCurrentVL, currentVL >= maxVLMax.U, avl >= maxVLMax.U)
val avl_lsbs = Mux(useCurrentVL, currentVL, avl)(maxVLMax.log2 - 1, 0)
val atLeastVLMax = atLeastMaxVLMax || (avl_lsbs & (-maxVLMax.S >> (this.vsew +& Cat(this.vlmul_sign, ~this.vlmul_mag))).asUInt.andNot((minVLMax-1).U)).orR
val isZero = vill || useZero
Mux(!isZero && atLeastVLMax, vlMax, 0.U) | Mux(!isZero && !atLeastVLMax, avl_lsbs, 0.U)
}
}
class CSRFile(
perfEventSets: EventSets = new EventSets(Seq()),
customCSRs: Seq[CustomCSR] = Nil,
roccCSRs: Seq[CustomCSR] = Nil,
hasBeu: Boolean = false)(implicit p: Parameters)
extends CoreModule()(p)
with HasCoreParameters {
val io = IO(new CSRFileIO(hasBeu) {
val customCSRs = Vec(CSRFile.this.customCSRs.size, new CustomCSRIO)
val roccCSRs = Vec(CSRFile.this.roccCSRs.size, new CustomCSRIO)
})
io.rw_stall := false.B
val reset_mstatus = WireDefault(0.U.asTypeOf(new MStatus()))
reset_mstatus.mpp := PRV.M.U
reset_mstatus.prv := PRV.M.U
reset_mstatus.xs := (if (usingRoCC) 3.U else 0.U)
val reg_mstatus = RegInit(reset_mstatus)
val new_prv = WireDefault(reg_mstatus.prv)
reg_mstatus.prv := legalizePrivilege(new_prv)
val reset_dcsr = WireDefault(0.U.asTypeOf(new DCSR()))
reset_dcsr.xdebugver := 1.U
reset_dcsr.prv := PRV.M.U
val reg_dcsr = RegInit(reset_dcsr)
val (supported_interrupts, delegable_interrupts) = {
val sup = Wire(new MIP)
sup.usip := false.B
sup.ssip := usingSupervisor.B
sup.vssip := usingHypervisor.B
sup.msip := true.B
sup.utip := false.B
sup.stip := usingSupervisor.B
sup.vstip := usingHypervisor.B
sup.mtip := true.B
sup.ueip := false.B
sup.seip := usingSupervisor.B
sup.vseip := usingHypervisor.B
sup.meip := true.B
sup.sgeip := false.B
sup.rocc := usingRoCC.B
sup.debug := false.B
sup.zero1 := false.B
sup.lip foreach { _ := true.B }
val supported_high_interrupts = if (io.interrupts.buserror.nonEmpty && !usingNMI) (BigInt(1) << CSR.busErrorIntCause).U else 0.U
val del = WireDefault(sup)
del.msip := false.B
del.mtip := false.B
del.meip := false.B
(sup.asUInt | supported_high_interrupts, del.asUInt)
}
val delegable_base_exceptions = Seq(
Causes.misaligned_fetch,
Causes.fetch_page_fault,
Causes.breakpoint,
Causes.load_page_fault,
Causes.store_page_fault,
Causes.misaligned_load,
Causes.misaligned_store,
Causes.illegal_instruction,
Causes.user_ecall,
)
val delegable_hypervisor_exceptions = Seq(
Causes.virtual_supervisor_ecall,
Causes.fetch_guest_page_fault,
Causes.load_guest_page_fault,
Causes.virtual_instruction,
Causes.store_guest_page_fault,
)
val delegable_exceptions = (
delegable_base_exceptions
++ (if (usingHypervisor) delegable_hypervisor_exceptions else Seq())
).map(1 << _).sum.U
val hs_delegable_exceptions = Seq(
Causes.misaligned_fetch,
Causes.fetch_access,
Causes.illegal_instruction,
Causes.breakpoint,
Causes.misaligned_load,
Causes.load_access,
Causes.misaligned_store,
Causes.store_access,
Causes.user_ecall,
Causes.fetch_page_fault,
Causes.load_page_fault,
Causes.store_page_fault).map(1 << _).sum.U
val (hs_delegable_interrupts, mideleg_always_hs) = {
val always = WireDefault(0.U.asTypeOf(new MIP()))
always.vssip := usingHypervisor.B
always.vstip := usingHypervisor.B
always.vseip := usingHypervisor.B
val deleg = WireDefault(always)
deleg.lip.foreach { _ := usingHypervisor.B }
(deleg.asUInt, always.asUInt)
}
val reg_debug = RegInit(false.B)
val reg_dpc = Reg(UInt(vaddrBitsExtended.W))
val reg_dscratch0 = Reg(UInt(xLen.W))
val reg_dscratch1 = (p(DebugModuleKey).map(_.nDscratch).getOrElse(1) > 1).option(Reg(UInt(xLen.W)))
val reg_singleStepped = Reg(Bool())
val reg_mcontext = (coreParams.mcontextWidth > 0).option(RegInit(0.U(coreParams.mcontextWidth.W)))
val reg_scontext = (coreParams.scontextWidth > 0).option(RegInit(0.U(coreParams.scontextWidth.W)))
val reg_tselect = Reg(UInt(log2Up(nBreakpoints).W))
val reg_bp = Reg(Vec(1 << log2Up(nBreakpoints), new BP))
val reg_pmp = Reg(Vec(nPMPs, new PMPReg))
val reg_mie = Reg(UInt(xLen.W))
val (reg_mideleg, read_mideleg) = {
val reg = Reg(UInt(xLen.W))
(reg, Mux(usingSupervisor.B, reg & delegable_interrupts | mideleg_always_hs, 0.U))
}
val (reg_medeleg, read_medeleg) = {
val reg = Reg(UInt(xLen.W))
(reg, Mux(usingSupervisor.B, reg & delegable_exceptions, 0.U))
}
val reg_mip = Reg(new MIP)
val reg_mepc = Reg(UInt(vaddrBitsExtended.W))
val reg_mcause = RegInit(0.U(xLen.W))
val reg_mtval = Reg(UInt(vaddrBitsExtended.W))
val reg_mtval2 = Reg(UInt(((maxSVAddrBits + 1) min xLen).W))
val reg_mscratch = Reg(Bits(xLen.W))
val mtvecWidth = paddrBits min xLen
val reg_mtvec = mtvecInit match {
case Some(addr) => RegInit(addr.U(mtvecWidth.W))
case None => Reg(UInt(mtvecWidth.W))
}
val reset_mnstatus = WireDefault(0.U.asTypeOf(new MNStatus()))
reset_mnstatus.mpp := PRV.M.U
val reg_mnscratch = Reg(Bits(xLen.W))
val reg_mnepc = Reg(UInt(vaddrBitsExtended.W))
val reg_mncause = RegInit(0.U(xLen.W))
val reg_mnstatus = RegInit(reset_mnstatus)
val reg_rnmie = RegInit(true.B)
val nmie = reg_rnmie
val reg_menvcfg = RegInit(0.U.asTypeOf(new Envcfg))
val reg_senvcfg = RegInit(0.U.asTypeOf(new Envcfg))
val reg_henvcfg = RegInit(0.U.asTypeOf(new Envcfg))
val delegable_counters = ((BigInt(1) << (nPerfCounters + CSR.firstHPM)) - 1).U
val (reg_mcounteren, read_mcounteren) = {
val reg = Reg(UInt(32.W))
(reg, Mux(usingUser.B, reg & delegable_counters, 0.U))
}
val (reg_scounteren, read_scounteren) = {
val reg = Reg(UInt(32.W))
(reg, Mux(usingSupervisor.B, reg & delegable_counters, 0.U))
}
val (reg_hideleg, read_hideleg) = {
val reg = Reg(UInt(xLen.W))
(reg, Mux(usingHypervisor.B, reg & hs_delegable_interrupts, 0.U))
}
val (reg_hedeleg, read_hedeleg) = {
val reg = Reg(UInt(xLen.W))
(reg, Mux(usingHypervisor.B, reg & hs_delegable_exceptions, 0.U))
}
val hs_delegable_counters = delegable_counters
val (reg_hcounteren, read_hcounteren) = {
val reg = Reg(UInt(32.W))
(reg, Mux(usingHypervisor.B, reg & hs_delegable_counters, 0.U))
}
val reg_hstatus = RegInit(0.U.asTypeOf(new HStatus))
val reg_hgatp = Reg(new PTBR)
val reg_htval = Reg(reg_mtval2.cloneType)
val read_hvip = reg_mip.asUInt & hs_delegable_interrupts
val read_hie = reg_mie & hs_delegable_interrupts
val (reg_vstvec, read_vstvec) = {
val reg = Reg(UInt(vaddrBitsExtended.W))
(reg, formTVec(reg).sextTo(xLen))
}
val reg_vsstatus = Reg(new MStatus)
val reg_vsscratch = Reg(Bits(xLen.W))
val reg_vsepc = Reg(UInt(vaddrBitsExtended.W))
val reg_vscause = Reg(Bits(xLen.W))
val reg_vstval = Reg(UInt(vaddrBitsExtended.W))
val reg_vsatp = Reg(new PTBR)
val reg_sepc = Reg(UInt(vaddrBitsExtended.W))
val reg_scause = Reg(Bits(xLen.W))
val reg_stval = Reg(UInt(vaddrBitsExtended.W))
val reg_sscratch = Reg(Bits(xLen.W))
val reg_stvec = Reg(UInt((if (usingHypervisor) vaddrBitsExtended else vaddrBits).W))
val reg_satp = Reg(new PTBR)
val reg_wfi = withClock(io.ungated_clock) { RegInit(false.B) }
val reg_fflags = Reg(UInt(5.W))
val reg_frm = Reg(UInt(3.W))
val reg_vconfig = usingVector.option(Reg(new VConfig))
val reg_vstart = usingVector.option(Reg(UInt(maxVLMax.log2.W)))
val reg_vxsat = usingVector.option(Reg(Bool()))
val reg_vxrm = usingVector.option(Reg(UInt(io.vector.get.vxrm.getWidth.W)))
val reg_mtinst_read_pseudo = Reg(Bool())
val reg_htinst_read_pseudo = Reg(Bool())
// XLEN=32: 0x00002000
// XLEN=64: 0x00003000
val Seq(read_mtinst, read_htinst) = Seq(reg_mtinst_read_pseudo, reg_htinst_read_pseudo).map(r => Cat(r, (xLen == 32).option(0.U).getOrElse(r), 0.U(12.W)))
val reg_mcountinhibit = RegInit(0.U((CSR.firstHPM + nPerfCounters).W))
io.inhibit_cycle := reg_mcountinhibit(0)
val reg_instret = WideCounter(64, io.retire, inhibit = reg_mcountinhibit(2))
val reg_cycle = if (enableCommitLog) WideCounter(64, io.retire, inhibit = reg_mcountinhibit(0))
else withClock(io.ungated_clock) { WideCounter(64, !io.csr_stall, inhibit = reg_mcountinhibit(0)) }
val reg_hpmevent = io.counters.map(c => RegInit(0.U(xLen.W)))
(io.counters zip reg_hpmevent) foreach { case (c, e) => c.eventSel := e }
val reg_hpmcounter = io.counters.zipWithIndex.map { case (c, i) =>
WideCounter(CSR.hpmWidth, c.inc, reset = false, inhibit = reg_mcountinhibit(CSR.firstHPM+i)) }
val mip = WireDefault(reg_mip)
mip.lip := (io.interrupts.lip: Seq[Bool])
mip.mtip := io.interrupts.mtip
mip.msip := io.interrupts.msip
mip.meip := io.interrupts.meip
// seip is the OR of reg_mip.seip and the actual line from the PLIC
io.interrupts.seip.foreach { mip.seip := reg_mip.seip || _ }
// Simimlar sort of thing would apply if the PLIC had a VSEIP line:
//io.interrupts.vseip.foreach { mip.vseip := reg_mip.vseip || _ }
mip.rocc := io.rocc_interrupt
val read_mip = mip.asUInt & supported_interrupts
val read_hip = read_mip & hs_delegable_interrupts
val high_interrupts = (if (usingNMI) 0.U else io.interrupts.buserror.map(_ << CSR.busErrorIntCause).getOrElse(0.U))
val pending_interrupts = high_interrupts | (read_mip & reg_mie)
val d_interrupts = io.interrupts.debug << CSR.debugIntCause
val (nmi_interrupts, nmiFlag) = io.interrupts.nmi.map(nmi =>
(((nmi.rnmi && reg_rnmie) << CSR.rnmiIntCause) |
io.interrupts.buserror.map(_ << CSR.rnmiBEUCause).getOrElse(0.U),
!io.interrupts.debug && nmi.rnmi && reg_rnmie)).getOrElse(0.U, false.B)
val m_interrupts = Mux(nmie && (reg_mstatus.prv <= PRV.S.U || reg_mstatus.mie), ~(~pending_interrupts | read_mideleg), 0.U)
val s_interrupts = Mux(nmie && (reg_mstatus.v || reg_mstatus.prv < PRV.S.U || (reg_mstatus.prv === PRV.S.U && reg_mstatus.sie)), pending_interrupts & read_mideleg & ~read_hideleg, 0.U)
val vs_interrupts = Mux(nmie && (reg_mstatus.v && (reg_mstatus.prv < PRV.S.U || reg_mstatus.prv === PRV.S.U && reg_vsstatus.sie)), pending_interrupts & read_hideleg, 0.U)
val (anyInterrupt, whichInterrupt) = chooseInterrupt(Seq(vs_interrupts, s_interrupts, m_interrupts, nmi_interrupts, d_interrupts))
val interruptMSB = BigInt(1) << (xLen-1)
val interruptCause = interruptMSB.U + (nmiFlag << (xLen-2)) + whichInterrupt
io.interrupt := (anyInterrupt && !io.singleStep || reg_singleStepped) && !(reg_debug || io.status.cease)
io.interrupt_cause := interruptCause
io.bp := reg_bp take nBreakpoints
io.mcontext := reg_mcontext.getOrElse(0.U)
io.scontext := reg_scontext.getOrElse(0.U)
io.fiom := (reg_mstatus.prv < PRV.M.U && reg_menvcfg.fiom) || (reg_mstatus.prv < PRV.S.U && reg_senvcfg.fiom) || (reg_mstatus.v && reg_henvcfg.fiom)
io.pmp := reg_pmp.map(PMP(_))
val isaMaskString =
(if (usingMulDiv) "M" else "") +
(if (usingAtomics) "A" else "") +
(if (fLen >= 32) "F" else "") +
(if (fLen >= 64) "D" else "") +
(if (coreParams.hasV) "V" else "") +
(if (usingCompressed) "C" else "")
val isaString = (if (coreParams.useRVE) "E" else "I") +
isaMaskString +
(if (customIsaExt.isDefined || usingRoCC) "X" else "") +
(if (usingSupervisor) "S" else "") +
(if (usingHypervisor) "H" else "") +
(if (usingUser) "U" else "")
val isaMax = (BigInt(log2Ceil(xLen) - 4) << (xLen-2)) | isaStringToMask(isaString)
val reg_misa = RegInit(isaMax.U)
val read_mstatus = io.status.asUInt.extract(xLen-1,0)
val read_mtvec = formTVec(reg_mtvec).padTo(xLen)
val read_stvec = formTVec(reg_stvec).sextTo(xLen)
val read_mapping = LinkedHashMap[Int,Bits](
CSRs.tselect -> reg_tselect,
CSRs.tdata1 -> reg_bp(reg_tselect).control.asUInt,
CSRs.tdata2 -> reg_bp(reg_tselect).address.sextTo(xLen),
CSRs.tdata3 -> reg_bp(reg_tselect).textra.asUInt,
CSRs.misa -> reg_misa,
CSRs.mstatus -> read_mstatus,
CSRs.mtvec -> read_mtvec,
CSRs.mip -> read_mip,
CSRs.mie -> reg_mie,
CSRs.mscratch -> reg_mscratch,
CSRs.mepc -> readEPC(reg_mepc).sextTo(xLen),
CSRs.mtval -> reg_mtval.sextTo(xLen),
CSRs.mcause -> reg_mcause,
CSRs.mhartid -> io.hartid)
val debug_csrs = if (!usingDebug) LinkedHashMap() else LinkedHashMap[Int,Bits](
CSRs.dcsr -> reg_dcsr.asUInt,
CSRs.dpc -> readEPC(reg_dpc).sextTo(xLen),
CSRs.dscratch0 -> reg_dscratch0.asUInt) ++
reg_dscratch1.map(r => CSRs.dscratch1 -> r)
val read_mnstatus = WireInit(0.U.asTypeOf(new MNStatus()))
read_mnstatus.mpp := reg_mnstatus.mpp
read_mnstatus.mpv := reg_mnstatus.mpv
read_mnstatus.mie := reg_rnmie
val nmi_csrs = if (!usingNMI) LinkedHashMap() else LinkedHashMap[Int,Bits](
CustomCSRs.mnscratch -> reg_mnscratch,
CustomCSRs.mnepc -> readEPC(reg_mnepc).sextTo(xLen),
CustomCSRs.mncause -> reg_mncause,
CustomCSRs.mnstatus -> read_mnstatus.asUInt)
val context_csrs = LinkedHashMap[Int,Bits]() ++
reg_mcontext.map(r => CSRs.mcontext -> r) ++
reg_scontext.map(r => CSRs.scontext -> r)
val read_fcsr = Cat(reg_frm, reg_fflags)
val fp_csrs = LinkedHashMap[Int,Bits]() ++
usingFPU.option(CSRs.fflags -> reg_fflags) ++
usingFPU.option(CSRs.frm -> reg_frm) ++
(usingFPU || usingVector).option(CSRs.fcsr -> read_fcsr)
val read_vcsr = Cat(reg_vxrm.getOrElse(0.U), reg_vxsat.getOrElse(0.U))
val vector_csrs = if (!usingVector) LinkedHashMap() else LinkedHashMap[Int,Bits](
CSRs.vxsat -> reg_vxsat.get,
CSRs.vxrm -> reg_vxrm.get,
CSRs.vcsr -> read_vcsr,
CSRs.vstart -> reg_vstart.get,
CSRs.vtype -> reg_vconfig.get.vtype.asUInt,
CSRs.vl -> reg_vconfig.get.vl,
CSRs.vlenb -> (vLen / 8).U)
read_mapping ++= debug_csrs
read_mapping ++= nmi_csrs
read_mapping ++= context_csrs
read_mapping ++= fp_csrs
read_mapping ++= vector_csrs
if (coreParams.haveBasicCounters) {
read_mapping += CSRs.mcountinhibit -> reg_mcountinhibit
read_mapping += CSRs.mcycle -> reg_cycle
read_mapping += CSRs.minstret -> reg_instret
for (((e, c), i) <- (reg_hpmevent.padTo(CSR.nHPM, 0.U)
zip reg_hpmcounter.map(x => x: UInt).padTo(CSR.nHPM, 0.U)).zipWithIndex) {
read_mapping += (i + CSR.firstHPE) -> e // mhpmeventN
read_mapping += (i + CSR.firstMHPC) -> c // mhpmcounterN
read_mapping += (i + CSR.firstHPC) -> c // hpmcounterN
if (xLen == 32) {
read_mapping += (i + CSR.firstMHPCH) -> (c >> 32) // mhpmcounterNh
read_mapping += (i + CSR.firstHPCH) -> (c >> 32) // hpmcounterNh
}
}
if (usingUser) {
read_mapping += CSRs.mcounteren -> read_mcounteren
}
read_mapping += CSRs.cycle -> reg_cycle
read_mapping += CSRs.instret -> reg_instret
if (xLen == 32) {
read_mapping += CSRs.mcycleh -> (reg_cycle >> 32)
read_mapping += CSRs.minstreth -> (reg_instret >> 32)
read_mapping += CSRs.cycleh -> (reg_cycle >> 32)
read_mapping += CSRs.instreth -> (reg_instret >> 32)
}
}
if (usingUser) {
read_mapping += CSRs.menvcfg -> reg_menvcfg.asUInt
if (xLen == 32)
read_mapping += CSRs.menvcfgh -> (reg_menvcfg.asUInt >> 32)
}
val sie_mask = {
val sgeip_mask = WireInit(0.U.asTypeOf(new MIP))
sgeip_mask.sgeip := true.B
read_mideleg & ~(hs_delegable_interrupts | sgeip_mask.asUInt)
}
if (usingSupervisor) {
val read_sie = reg_mie & sie_mask
val read_sip = read_mip & sie_mask
val read_sstatus = WireDefault(0.U.asTypeOf(new MStatus))
read_sstatus.sd := io.status.sd
read_sstatus.uxl := io.status.uxl
read_sstatus.sd_rv32 := io.status.sd_rv32
read_sstatus.mxr := io.status.mxr
read_sstatus.sum := io.status.sum
read_sstatus.xs := io.status.xs
read_sstatus.fs := io.status.fs
read_sstatus.vs := io.status.vs
read_sstatus.spp := io.status.spp
read_sstatus.spie := io.status.spie
read_sstatus.sie := io.status.sie
read_mapping += CSRs.sstatus -> (read_sstatus.asUInt)(xLen-1,0)
read_mapping += CSRs.sip -> read_sip.asUInt
read_mapping += CSRs.sie -> read_sie.asUInt
read_mapping += CSRs.sscratch -> reg_sscratch
read_mapping += CSRs.scause -> reg_scause
read_mapping += CSRs.stval -> reg_stval.sextTo(xLen)
read_mapping += CSRs.satp -> reg_satp.asUInt
read_mapping += CSRs.sepc -> readEPC(reg_sepc).sextTo(xLen)
read_mapping += CSRs.stvec -> read_stvec
read_mapping += CSRs.scounteren -> read_scounteren
read_mapping += CSRs.mideleg -> read_mideleg
read_mapping += CSRs.medeleg -> read_medeleg
read_mapping += CSRs.senvcfg -> reg_senvcfg.asUInt
}
val pmpCfgPerCSR = xLen / new PMPConfig().getWidth
def pmpCfgIndex(i: Int) = (xLen / 32) * (i / pmpCfgPerCSR)
if (reg_pmp.nonEmpty) {
require(reg_pmp.size <= CSR.maxPMPs)
val read_pmp = reg_pmp.padTo(CSR.maxPMPs, 0.U.asTypeOf(new PMP))
for (i <- 0 until read_pmp.size by pmpCfgPerCSR)
read_mapping += (CSRs.pmpcfg0 + pmpCfgIndex(i)) -> read_pmp.map(_.cfg).slice(i, i + pmpCfgPerCSR).asUInt
for ((pmp, i) <- read_pmp.zipWithIndex)
read_mapping += (CSRs.pmpaddr0 + i) -> pmp.readAddr
}
// implementation-defined CSRs
def generateCustomCSR(csr: CustomCSR, csr_io: CustomCSRIO) = {
require(csr.mask >= 0 && csr.mask.bitLength <= xLen)
require(!read_mapping.contains(csr.id))
val reg = csr.init.map(init => RegInit(init.U(xLen.W))).getOrElse(Reg(UInt(xLen.W)))
val read = io.rw.cmd =/= CSR.N && io.rw.addr === csr.id.U
csr_io.ren := read
when (read && csr_io.stall) { io.rw_stall := true.B }
read_mapping += csr.id -> reg
reg
}
val reg_custom = customCSRs.zip(io.customCSRs).map(t => generateCustomCSR(t._1, t._2))
val reg_rocc = roccCSRs.zip(io.roccCSRs).map(t => generateCustomCSR(t._1, t._2))
if (usingHypervisor) {
read_mapping += CSRs.mtinst -> read_mtinst
read_mapping += CSRs.mtval2 -> reg_mtval2
val read_hstatus = io.hstatus.asUInt.extract(xLen-1,0)
read_mapping += CSRs.hstatus -> read_hstatus
read_mapping += CSRs.hedeleg -> read_hedeleg
read_mapping += CSRs.hideleg -> read_hideleg
read_mapping += CSRs.hcounteren-> read_hcounteren
read_mapping += CSRs.hgatp -> reg_hgatp.asUInt
read_mapping += CSRs.hip -> read_hip
read_mapping += CSRs.hie -> read_hie
read_mapping += CSRs.hvip -> read_hvip
read_mapping += CSRs.hgeie -> 0.U
read_mapping += CSRs.hgeip -> 0.U
read_mapping += CSRs.htval -> reg_htval
read_mapping += CSRs.htinst -> read_htinst
read_mapping += CSRs.henvcfg -> reg_henvcfg.asUInt
if (xLen == 32)
read_mapping += CSRs.henvcfgh -> (reg_henvcfg.asUInt >> 32)
val read_vsie = (read_hie & read_hideleg) >> 1
val read_vsip = (read_hip & read_hideleg) >> 1
val read_vsepc = readEPC(reg_vsepc).sextTo(xLen)
val read_vstval = reg_vstval.sextTo(xLen)
val read_vsstatus = io.gstatus.asUInt.extract(xLen-1,0)
read_mapping += CSRs.vsstatus -> read_vsstatus
read_mapping += CSRs.vsip -> read_vsip
read_mapping += CSRs.vsie -> read_vsie
read_mapping += CSRs.vsscratch -> reg_vsscratch
read_mapping += CSRs.vscause -> reg_vscause
read_mapping += CSRs.vstval -> read_vstval
read_mapping += CSRs.vsatp -> reg_vsatp.asUInt
read_mapping += CSRs.vsepc -> read_vsepc
read_mapping += CSRs.vstvec -> read_vstvec
}
// mimpid, marchid, mvendorid, and mconfigptr are 0 unless overridden by customCSRs
Seq(CSRs.mimpid, CSRs.marchid, CSRs.mvendorid, CSRs.mconfigptr).foreach(id => read_mapping.getOrElseUpdate(id, 0.U))
val decoded_addr = {
val addr = Cat(io.status.v, io.rw.addr)
val pats = for (((k, _), i) <- read_mapping.zipWithIndex)
yield (BitPat(k.U), (0 until read_mapping.size).map(j => BitPat((i == j).B)))
val decoded = DecodeLogic(addr, Seq.fill(read_mapping.size)(X), pats)
val unvirtualized_mapping = (for (((k, _), v) <- read_mapping zip decoded) yield k -> v.asBool).toMap
for ((k, v) <- unvirtualized_mapping) yield k -> {
val alt: Option[Bool] = CSR.mode(k) match {
// hcontext was assigned an unfortunate address; it lives where a
// hypothetical vscontext will live. Exclude them from the S/VS remapping.
// (on separate lines so scala-lint doesnt do something stupid)
case _ if k == CSRs.scontext => None
case _ if k == CSRs.hcontext => None
// When V=1, if a corresponding VS CSR exists, access it instead...
case PRV.H => unvirtualized_mapping.lift(k - (1 << CSR.modeLSB))
// ...and don't access the original S-mode version.
case PRV.S => unvirtualized_mapping.contains(k + (1 << CSR.modeLSB)).option(false.B)
case _ => None
}
alt.map(Mux(reg_mstatus.v, _, v)).getOrElse(v)
}
}
val wdata = readModifyWriteCSR(io.rw.cmd, io.rw.rdata, io.rw.wdata)
val system_insn = io.rw.cmd === CSR.I
val hlsv = Seq(HLV_B, HLV_BU, HLV_H, HLV_HU, HLV_W, HLV_WU, HLV_D, HSV_B, HSV_H, HSV_W, HSV_D, HLVX_HU, HLVX_WU)
val decode_table = Seq( ECALL-> List(Y,N,N,N,N,N,N,N,N),
EBREAK-> List(N,Y,N,N,N,N,N,N,N),
MRET-> List(N,N,Y,N,N,N,N,N,N),
CEASE-> List(N,N,N,Y,N,N,N,N,N),
WFI-> List(N,N,N,N,Y,N,N,N,N)) ++
usingDebug.option( DRET-> List(N,N,Y,N,N,N,N,N,N)) ++
usingNMI.option( MNRET-> List(N,N,Y,N,N,N,N,N,N)) ++
coreParams.haveCFlush.option(CFLUSH_D_L1-> List(N,N,N,N,N,N,N,N,N)) ++
usingSupervisor.option( SRET-> List(N,N,Y,N,N,N,N,N,N)) ++
usingVM.option( SFENCE_VMA-> List(N,N,N,N,N,Y,N,N,N)) ++
usingHypervisor.option( HFENCE_VVMA-> List(N,N,N,N,N,N,Y,N,N)) ++
usingHypervisor.option( HFENCE_GVMA-> List(N,N,N,N,N,N,N,Y,N)) ++
(if (usingHypervisor) hlsv.map(_-> List(N,N,N,N,N,N,N,N,Y)) else Seq())
val insn_call :: insn_break :: insn_ret :: insn_cease :: insn_wfi :: _ :: _ :: _ :: _ :: Nil = {
val insn = ECALL.value.U | (io.rw.addr << 20)
DecodeLogic(insn, decode_table(0)._2.map(x=>X), decode_table).map(system_insn && _.asBool)
}
for (io_dec <- io.decode) {
val addr = io_dec.inst(31, 20)
def decodeAny(m: LinkedHashMap[Int,Bits]): Bool = m.map { case(k: Int, _: Bits) => addr === k.U }.reduce(_||_)
def decodeFast(s: Seq[Int]): Bool = DecodeLogic(addr, s.map(_.U), (read_mapping -- s).keys.toList.map(_.U))
val _ :: is_break :: is_ret :: _ :: is_wfi :: is_sfence :: is_hfence_vvma :: is_hfence_gvma :: is_hlsv :: Nil =
DecodeLogic(io_dec.inst, decode_table(0)._2.map(x=>X), decode_table).map(_.asBool)
val is_counter = (addr.inRange(CSR.firstCtr.U, (CSR.firstCtr + CSR.nCtr).U) || addr.inRange(CSR.firstCtrH.U, (CSR.firstCtrH + CSR.nCtr).U))
val allow_wfi = (!usingSupervisor).B || reg_mstatus.prv > PRV.S.U || !reg_mstatus.tw && (!reg_mstatus.v || !reg_hstatus.vtw)
val allow_sfence_vma = (!usingVM).B || reg_mstatus.prv > PRV.S.U || !Mux(reg_mstatus.v, reg_hstatus.vtvm, reg_mstatus.tvm)
val allow_hfence_vvma = (!usingHypervisor).B || !reg_mstatus.v && (reg_mstatus.prv >= PRV.S.U)
val allow_hlsv = (!usingHypervisor).B || !reg_mstatus.v && (reg_mstatus.prv >= PRV.S.U || reg_hstatus.hu)
val allow_sret = (!usingSupervisor).B || reg_mstatus.prv > PRV.S.U || !Mux(reg_mstatus.v, reg_hstatus.vtsr, reg_mstatus.tsr)
val counter_addr = addr(log2Ceil(read_mcounteren.getWidth)-1, 0)
val allow_counter = (reg_mstatus.prv > PRV.S.U || read_mcounteren(counter_addr)) &&
(!usingSupervisor.B || reg_mstatus.prv >= PRV.S.U || read_scounteren(counter_addr)) &&
(!usingHypervisor.B || !reg_mstatus.v || read_hcounteren(counter_addr))
io_dec.fp_illegal := io.status.fs === 0.U || reg_mstatus.v && reg_vsstatus.fs === 0.U || !reg_misa('f'-'a')
io_dec.vector_illegal := io.status.vs === 0.U || reg_mstatus.v && reg_vsstatus.vs === 0.U || !reg_misa('v'-'a')
io_dec.fp_csr := decodeFast(fp_csrs.keys.toList)
io_dec.vector_csr := decodeFast(vector_csrs.keys.toList)
io_dec.rocc_illegal := io.status.xs === 0.U || reg_mstatus.v && reg_vsstatus.xs === 0.U || !reg_misa('x'-'a')
val csr_addr_legal = reg_mstatus.prv >= CSR.mode(addr) ||
usingHypervisor.B && !reg_mstatus.v && reg_mstatus.prv === PRV.S.U && CSR.mode(addr) === PRV.H.U
val csr_exists = decodeAny(read_mapping)
io_dec.read_illegal := !csr_addr_legal ||
!csr_exists ||
((addr === CSRs.satp.U || addr === CSRs.hgatp.U) && !allow_sfence_vma) ||
is_counter && !allow_counter ||
decodeFast(debug_csrs.keys.toList) && !reg_debug ||
decodeFast(vector_csrs.keys.toList) && io_dec.vector_illegal ||
io_dec.fp_csr && io_dec.fp_illegal
io_dec.write_illegal := addr(11,10).andR
io_dec.write_flush := {
val addr_m = addr | (PRV.M.U << CSR.modeLSB)
!(addr_m >= CSRs.mscratch.U && addr_m <= CSRs.mtval.U)
}
io_dec.system_illegal := !csr_addr_legal && !is_hlsv ||
is_wfi && !allow_wfi ||
is_ret && !allow_sret ||
is_ret && addr(10) && addr(7) && !reg_debug ||
(is_sfence || is_hfence_gvma) && !allow_sfence_vma ||
is_hfence_vvma && !allow_hfence_vvma ||
is_hlsv && !allow_hlsv
io_dec.virtual_access_illegal := reg_mstatus.v && csr_exists && (
CSR.mode(addr) === PRV.H.U ||
is_counter && read_mcounteren(counter_addr) && (!read_hcounteren(counter_addr) || !reg_mstatus.prv(0) && !read_scounteren(counter_addr)) ||
CSR.mode(addr) === PRV.S.U && !reg_mstatus.prv(0) ||
addr === CSRs.satp.U && reg_mstatus.prv(0) && reg_hstatus.vtvm)
io_dec.virtual_system_illegal := reg_mstatus.v && (
is_hfence_vvma ||
is_hfence_gvma ||
is_hlsv ||
is_wfi && (!reg_mstatus.prv(0) || !reg_mstatus.tw && reg_hstatus.vtw) ||
is_ret && CSR.mode(addr) === PRV.S.U && (!reg_mstatus.prv(0) || reg_hstatus.vtsr) ||
is_sfence && (!reg_mstatus.prv(0) || reg_hstatus.vtvm))
}
val cause =
Mux(insn_call, Causes.user_ecall.U + Mux(reg_mstatus.prv(0) && reg_mstatus.v, PRV.H.U, reg_mstatus.prv),
Mux[UInt](insn_break, Causes.breakpoint.U, io.cause))
val cause_lsbs = cause(log2Ceil(1 + CSR.busErrorIntCause)-1, 0)
val cause_deleg_lsbs = cause(log2Ceil(xLen)-1,0)
val causeIsDebugInt = cause(xLen-1) && cause_lsbs === CSR.debugIntCause.U
val causeIsDebugTrigger = !cause(xLen-1) && cause_lsbs === CSR.debugTriggerCause.U
val causeIsDebugBreak = !cause(xLen-1) && insn_break && Cat(reg_dcsr.ebreakm, reg_dcsr.ebreakh, reg_dcsr.ebreaks, reg_dcsr.ebreaku)(reg_mstatus.prv)
val trapToDebug = usingDebug.B && (reg_singleStepped || causeIsDebugInt || causeIsDebugTrigger || causeIsDebugBreak || reg_debug)
val debugEntry = p(DebugModuleKey).map(_.debugEntry).getOrElse(BigInt(0x800))
val debugException = p(DebugModuleKey).map(_.debugException).getOrElse(BigInt(0x808))
val debugTVec = Mux(reg_debug, Mux(insn_break, debugEntry.U, debugException.U), debugEntry.U)
val delegate = usingSupervisor.B && reg_mstatus.prv <= PRV.S.U && Mux(cause(xLen-1), read_mideleg(cause_deleg_lsbs), read_medeleg(cause_deleg_lsbs))
val delegateVS = reg_mstatus.v && delegate && Mux(cause(xLen-1), read_hideleg(cause_deleg_lsbs), read_hedeleg(cause_deleg_lsbs))
def mtvecBaseAlign = 2
def mtvecInterruptAlign = {
require(reg_mip.getWidth <= xLen)
log2Ceil(xLen)
}
val notDebugTVec = {
val base = Mux(delegate, Mux(delegateVS, read_vstvec, read_stvec), read_mtvec)
val interruptOffset = cause(mtvecInterruptAlign-1, 0) << mtvecBaseAlign
val interruptVec = Cat(base >> (mtvecInterruptAlign + mtvecBaseAlign), interruptOffset)
val doVector = base(0) && cause(cause.getWidth-1) && (cause_lsbs >> mtvecInterruptAlign) === 0.U
Mux(doVector, interruptVec, base >> mtvecBaseAlign << mtvecBaseAlign)
}
val causeIsRnmiInt = cause(xLen-1) && cause(xLen-2) && (cause_lsbs === CSR.rnmiIntCause.U || cause_lsbs === CSR.rnmiBEUCause.U)
val causeIsRnmiBEU = cause(xLen-1) && cause(xLen-2) && cause_lsbs === CSR.rnmiBEUCause.U
val causeIsNmi = causeIsRnmiInt
val nmiTVecInt = io.interrupts.nmi.map(nmi => nmi.rnmi_interrupt_vector).getOrElse(0.U)
val nmiTVecXcpt = io.interrupts.nmi.map(nmi => nmi.rnmi_exception_vector).getOrElse(0.U)
val trapToNmiInt = usingNMI.B && causeIsNmi
val trapToNmiXcpt = usingNMI.B && !nmie
val trapToNmi = trapToNmiInt || trapToNmiXcpt
val nmiTVec = (Mux(causeIsNmi, nmiTVecInt, nmiTVecXcpt)>>1)<<1
val tvec = Mux(trapToDebug, debugTVec, Mux(trapToNmi, nmiTVec, notDebugTVec))
io.evec := tvec
io.ptbr := reg_satp
io.hgatp := reg_hgatp
io.vsatp := reg_vsatp
io.eret := insn_call || insn_break || insn_ret
io.singleStep := reg_dcsr.step && !reg_debug
io.status := reg_mstatus
io.status.sd := io.status.fs.andR || io.status.xs.andR || io.status.vs.andR
io.status.debug := reg_debug
io.status.isa := reg_misa
io.status.uxl := (if (usingUser) log2Ceil(xLen) - 4 else 0).U
io.status.sxl := (if (usingSupervisor) log2Ceil(xLen) - 4 else 0).U
io.status.dprv := Mux(reg_mstatus.mprv && !reg_debug, reg_mstatus.mpp, reg_mstatus.prv)
io.status.dv := reg_mstatus.v || Mux(reg_mstatus.mprv && !reg_debug, reg_mstatus.mpv, false.B)
io.status.sd_rv32 := (xLen == 32).B && io.status.sd
io.status.mpv := reg_mstatus.mpv
io.status.gva := reg_mstatus.gva
io.hstatus := reg_hstatus
io.hstatus.vsxl := (if (usingSupervisor) log2Ceil(xLen) - 4 else 0).U
io.gstatus := reg_vsstatus
io.gstatus.sd := io.gstatus.fs.andR || io.gstatus.xs.andR || io.gstatus.vs.andR
io.gstatus.uxl := (if (usingUser) log2Ceil(xLen) - 4 else 0).U
io.gstatus.sd_rv32 := (xLen == 32).B && io.gstatus.sd
val exception = insn_call || insn_break || io.exception
assert(PopCount(insn_ret :: insn_call :: insn_break :: io.exception :: Nil) <= 1.U, "these conditions must be mutually exclusive")
when (insn_wfi && !io.singleStep && !reg_debug) { reg_wfi := true.B }
when (pending_interrupts.orR || io.interrupts.debug || exception) { reg_wfi := false.B }
io.interrupts.nmi.map(nmi => when (nmi.rnmi) { reg_wfi := false.B } )
when (io.retire(0) || exception) { reg_singleStepped := true.B }
when (!io.singleStep) { reg_singleStepped := false.B }
assert(!io.singleStep || io.retire <= 1.U)
assert(!reg_singleStepped || io.retire === 0.U)
val epc = formEPC(io.pc)
val tval = Mux(insn_break, epc, io.tval)
when (exception) {
when (trapToDebug) {
when (!reg_debug) {
reg_mstatus.v := false.B
reg_debug := true.B
reg_dpc := epc
reg_dcsr.cause := Mux(reg_singleStepped, 4.U, Mux(causeIsDebugInt, 3.U, Mux[UInt](causeIsDebugTrigger, 2.U, 1.U)))
reg_dcsr.prv := trimPrivilege(reg_mstatus.prv)
reg_dcsr.v := reg_mstatus.v
new_prv := PRV.M.U
}
}.elsewhen (trapToNmiInt) {
when (reg_rnmie) {
reg_mstatus.v := false.B
reg_mnstatus.mpv := reg_mstatus.v
reg_rnmie := false.B
reg_mnepc := epc
reg_mncause := (BigInt(1) << (xLen-1)).U | Mux(causeIsRnmiBEU, 3.U, 2.U)
reg_mnstatus.mpp := trimPrivilege(reg_mstatus.prv)
new_prv := PRV.M.U
}
}.elsewhen (delegateVS && nmie) {
reg_mstatus.v := true.B
reg_vsstatus.spp := reg_mstatus.prv
reg_vsepc := epc
reg_vscause := Mux(cause(xLen-1), Cat(cause(xLen-1, 2), 1.U(2.W)), cause)
reg_vstval := tval
reg_vsstatus.spie := reg_vsstatus.sie
reg_vsstatus.sie := false.B
new_prv := PRV.S.U
}.elsewhen (delegate && nmie) {
reg_mstatus.v := false.B
reg_hstatus.spvp := Mux(reg_mstatus.v, reg_mstatus.prv(0),reg_hstatus.spvp)
reg_hstatus.gva := io.gva
reg_hstatus.spv := reg_mstatus.v
reg_sepc := epc
reg_scause := cause
reg_stval := tval
reg_htval := io.htval
reg_htinst_read_pseudo := io.mhtinst_read_pseudo
reg_mstatus.spie := reg_mstatus.sie
reg_mstatus.spp := reg_mstatus.prv
reg_mstatus.sie := false.B
new_prv := PRV.S.U
}.otherwise {
reg_mstatus.v := false.B
reg_mstatus.mpv := reg_mstatus.v
reg_mstatus.gva := io.gva
reg_mepc := epc
reg_mcause := cause
reg_mtval := tval
reg_mtval2 := io.htval
reg_mtinst_read_pseudo := io.mhtinst_read_pseudo
reg_mstatus.mpie := reg_mstatus.mie
reg_mstatus.mpp := trimPrivilege(reg_mstatus.prv)
reg_mstatus.mie := false.B
new_prv := PRV.M.U
}
}
for (i <- 0 until supported_interrupts.getWidth) {
val en = exception && (supported_interrupts & (BigInt(1) << i).U) =/= 0.U && cause === (BigInt(1) << (xLen - 1)).U + i.U
val delegable = (delegable_interrupts & (BigInt(1) << i).U) =/= 0.U
property.cover(en && !delegate, s"INTERRUPT_M_$i")
property.cover(en && delegable && delegate, s"INTERRUPT_S_$i")
}
for (i <- 0 until xLen) {
val supported_exceptions: BigInt = 0x8fe |
(if (usingCompressed && !coreParams.misaWritable) 0 else 1) |
(if (usingUser) 0x100 else 0) |
(if (usingSupervisor) 0x200 else 0) |
(if (usingVM) 0xb000 else 0)
if (((supported_exceptions >> i) & 1) != 0) {
val en = exception && cause === i.U
val delegable = (delegable_exceptions & (BigInt(1) << i).U) =/= 0.U
property.cover(en && !delegate, s"EXCEPTION_M_$i")
property.cover(en && delegable && delegate, s"EXCEPTION_S_$i")
}
}
when (insn_ret) {
val ret_prv = WireInit(UInt(), DontCare)
when (usingSupervisor.B && !io.rw.addr(9)) {
when (!reg_mstatus.v) {
reg_mstatus.sie := reg_mstatus.spie
reg_mstatus.spie := true.B
reg_mstatus.spp := PRV.U.U
ret_prv := reg_mstatus.spp
reg_mstatus.v := usingHypervisor.B && reg_hstatus.spv
io.evec := readEPC(reg_sepc)
reg_hstatus.spv := false.B
}.otherwise {
reg_vsstatus.sie := reg_vsstatus.spie
reg_vsstatus.spie := true.B
reg_vsstatus.spp := PRV.U.U
ret_prv := reg_vsstatus.spp
reg_mstatus.v := usingHypervisor.B
io.evec := readEPC(reg_vsepc)
}
}.elsewhen (usingDebug.B && io.rw.addr(10) && io.rw.addr(7)) {
ret_prv := reg_dcsr.prv
reg_mstatus.v := usingHypervisor.B && reg_dcsr.v && reg_dcsr.prv <= PRV.S.U
reg_debug := false.B
io.evec := readEPC(reg_dpc)
}.elsewhen (usingNMI.B && io.rw.addr(10) && !io.rw.addr(7)) {
ret_prv := reg_mnstatus.mpp
reg_mstatus.v := usingHypervisor.B && reg_mnstatus.mpv && reg_mnstatus.mpp <= PRV.S.U
reg_rnmie := true.B
io.evec := readEPC(reg_mnepc)
}.otherwise {
reg_mstatus.mie := reg_mstatus.mpie
reg_mstatus.mpie := true.B
reg_mstatus.mpp := legalizePrivilege(PRV.U.U)
reg_mstatus.mpv := false.B
ret_prv := reg_mstatus.mpp
reg_mstatus.v := usingHypervisor.B && reg_mstatus.mpv && reg_mstatus.mpp <= PRV.S.U
io.evec := readEPC(reg_mepc)
}
new_prv := ret_prv
when (usingUser.B && ret_prv <= PRV.S.U) {
reg_mstatus.mprv := false.B
}
}
io.time := reg_cycle
io.csr_stall := reg_wfi || io.status.cease
io.status.cease := RegEnable(true.B, false.B, insn_cease)
io.status.wfi := reg_wfi
for ((io, reg) <- io.customCSRs zip reg_custom) {
io.wen := false.B
io.wdata := wdata
io.value := reg
}
for ((io, reg) <- io.roccCSRs zip reg_rocc) {
io.wen := false.B
io.wdata := wdata
io.value := reg
}
io.rw.rdata := Mux1H(for ((k, v) <- read_mapping) yield decoded_addr(k) -> v)
// cover access to register
val coverable_counters = read_mapping.filterNot { case (k, _) =>
k >= CSR.firstHPC + nPerfCounters && k < CSR.firstHPC + CSR.nHPM
}
coverable_counters.foreach( {case (k, v) => {
when (!k.U(11,10).andR) { // Cover points for RW CSR registers
property.cover(io.rw.cmd.isOneOf(CSR.W, CSR.S, CSR.C) && io.rw.addr===k.U, "CSR_access_"+k.toString, "Cover Accessing Core CSR field")
} .otherwise { // Cover points for RO CSR registers
property.cover(io.rw.cmd===CSR.R && io.rw.addr===k.U, "CSR_access_"+k.toString, "Cover Accessing Core CSR field")
}
}})
val set_vs_dirty = WireDefault(io.vector.map(_.set_vs_dirty).getOrElse(false.B))
io.vector.foreach { vio =>
when (set_vs_dirty) {
assert(reg_mstatus.vs > 0.U)
when (reg_mstatus.v) { reg_vsstatus.vs := 3.U }
reg_mstatus.vs := 3.U
}
}
val set_fs_dirty = WireDefault(io.set_fs_dirty.getOrElse(false.B))
if (coreParams.haveFSDirty) {
when (set_fs_dirty) {
assert(reg_mstatus.fs > 0.U)
when (reg_mstatus.v) { reg_vsstatus.fs := 3.U }
reg_mstatus.fs := 3.U
}
}
io.fcsr_rm := reg_frm
when (io.fcsr_flags.valid) {
reg_fflags := reg_fflags | io.fcsr_flags.bits
set_fs_dirty := true.B
}
io.vector.foreach { vio =>
when (vio.set_vxsat) {
reg_vxsat.get := true.B
set_vs_dirty := true.B
}
}
val csr_wen = io.rw.cmd.isOneOf(CSR.S, CSR.C, CSR.W) && !io.rw_stall
io.csrw_counter := Mux(coreParams.haveBasicCounters.B && csr_wen && (io.rw.addr.inRange(CSRs.mcycle.U, (CSRs.mcycle + CSR.nCtr).U) || io.rw.addr.inRange(CSRs.mcycleh.U, (CSRs.mcycleh + CSR.nCtr).U)), UIntToOH(io.rw.addr(log2Ceil(CSR.nCtr+nPerfCounters)-1, 0)), 0.U)
when (csr_wen) {
val scause_mask = ((BigInt(1) << (xLen-1)) + 31).U /* only implement 5 LSBs and MSB */
val satp_valid_modes = 0 +: (minPgLevels to pgLevels).map(new PTBR().pgLevelsToMode(_))
when (decoded_addr(CSRs.mstatus)) {
val new_mstatus = wdata.asTypeOf(new MStatus())
reg_mstatus.mie := new_mstatus.mie
reg_mstatus.mpie := new_mstatus.mpie
if (usingUser) {
reg_mstatus.mprv := new_mstatus.mprv
reg_mstatus.mpp := legalizePrivilege(new_mstatus.mpp)
if (usingSupervisor) {
reg_mstatus.spp := new_mstatus.spp
reg_mstatus.spie := new_mstatus.spie
reg_mstatus.sie := new_mstatus.sie
reg_mstatus.tw := new_mstatus.tw
reg_mstatus.tsr := new_mstatus.tsr
}
if (usingVM) {
reg_mstatus.mxr := new_mstatus.mxr
reg_mstatus.sum := new_mstatus.sum
reg_mstatus.tvm := new_mstatus.tvm
}
if (usingHypervisor) {
reg_mstatus.mpv := new_mstatus.mpv
reg_mstatus.gva := new_mstatus.gva
}
}
if (usingSupervisor || usingFPU) reg_mstatus.fs := formFS(new_mstatus.fs)
reg_mstatus.vs := formVS(new_mstatus.vs)
}
when (decoded_addr(CSRs.misa)) {
val mask = isaStringToMask(isaMaskString).U(xLen.W)
val f = wdata('f' - 'a')
// suppress write if it would cause the next fetch to be misaligned
when (!usingCompressed.B || !io.pc(1) || wdata('c' - 'a')) {
if (coreParams.misaWritable)
reg_misa := ~(~wdata | (!f << ('d' - 'a'))) & mask | reg_misa & ~mask
}
}
when (decoded_addr(CSRs.mip)) {
// MIP should be modified based on the value in reg_mip, not the value
// in read_mip, since read_mip.seip is the OR of reg_mip.seip and
// io.interrupts.seip. We don't want the value on the PLIC line to
// inadvertently be OR'd into read_mip.seip.
val new_mip = readModifyWriteCSR(io.rw.cmd, reg_mip.asUInt, io.rw.wdata).asTypeOf(new MIP)
if (usingSupervisor) {
reg_mip.ssip := new_mip.ssip
reg_mip.stip := new_mip.stip
reg_mip.seip := new_mip.seip
}
if (usingHypervisor) {
reg_mip.vssip := new_mip.vssip
}
}
when (decoded_addr(CSRs.mie)) { reg_mie := wdata & supported_interrupts }
when (decoded_addr(CSRs.mepc)) { reg_mepc := formEPC(wdata) }
when (decoded_addr(CSRs.mscratch)) { reg_mscratch := wdata }
if (mtvecWritable)
when (decoded_addr(CSRs.mtvec)) { reg_mtvec := wdata }
when (decoded_addr(CSRs.mcause)) { reg_mcause := wdata & ((BigInt(1) << (xLen-1)) + (BigInt(1) << whichInterrupt.getWidth) - 1).U }
when (decoded_addr(CSRs.mtval)) { reg_mtval := wdata }
if (usingNMI) {
val new_mnstatus = wdata.asTypeOf(new MNStatus())
when (decoded_addr(CustomCSRs.mnscratch)) { reg_mnscratch := wdata }
when (decoded_addr(CustomCSRs.mnepc)) { reg_mnepc := formEPC(wdata) }
when (decoded_addr(CustomCSRs.mncause)) { reg_mncause := wdata & ((BigInt(1) << (xLen-1)) + BigInt(3)).U }
when (decoded_addr(CustomCSRs.mnstatus)) {
reg_mnstatus.mpp := legalizePrivilege(new_mnstatus.mpp)
reg_mnstatus.mpv := usingHypervisor.B && new_mnstatus.mpv
reg_rnmie := reg_rnmie | new_mnstatus.mie // mnie bit settable but not clearable from software
}
}
for (((e, c), i) <- (reg_hpmevent zip reg_hpmcounter).zipWithIndex) {
writeCounter(i + CSR.firstMHPC, c, wdata)
when (decoded_addr(i + CSR.firstHPE)) { e := perfEventSets.maskEventSelector(wdata) }
}
if (coreParams.haveBasicCounters) {
when (decoded_addr(CSRs.mcountinhibit)) { reg_mcountinhibit := wdata & ~2.U(xLen.W) } // mcountinhibit bit [1] is tied zero
writeCounter(CSRs.mcycle, reg_cycle, wdata)
writeCounter(CSRs.minstret, reg_instret, wdata)
}
if (usingFPU) {
when (decoded_addr(CSRs.fflags)) { set_fs_dirty := true.B; reg_fflags := wdata }
when (decoded_addr(CSRs.frm)) { set_fs_dirty := true.B; reg_frm := wdata }
when (decoded_addr(CSRs.fcsr)) {
set_fs_dirty := true.B
reg_fflags := wdata
reg_frm := wdata >> reg_fflags.getWidth
}
}
if (usingDebug) {
when (decoded_addr(CSRs.dcsr)) {
val new_dcsr = wdata.asTypeOf(new DCSR())
reg_dcsr.step := new_dcsr.step
reg_dcsr.ebreakm := new_dcsr.ebreakm
if (usingSupervisor) reg_dcsr.ebreaks := new_dcsr.ebreaks
if (usingUser) reg_dcsr.ebreaku := new_dcsr.ebreaku
if (usingUser) reg_dcsr.prv := legalizePrivilege(new_dcsr.prv)
if (usingHypervisor) reg_dcsr.v := new_dcsr.v
}
when (decoded_addr(CSRs.dpc)) { reg_dpc := formEPC(wdata) }
when (decoded_addr(CSRs.dscratch0)) { reg_dscratch0 := wdata }
reg_dscratch1.foreach { r =>
when (decoded_addr(CSRs.dscratch1)) { r := wdata }
}
}
if (usingSupervisor) {
when (decoded_addr(CSRs.sstatus)) {
val new_sstatus = wdata.asTypeOf(new MStatus())
reg_mstatus.sie := new_sstatus.sie
reg_mstatus.spie := new_sstatus.spie
reg_mstatus.spp := new_sstatus.spp
reg_mstatus.fs := formFS(new_sstatus.fs)
reg_mstatus.vs := formVS(new_sstatus.vs)
if (usingVM) {
reg_mstatus.mxr := new_sstatus.mxr
reg_mstatus.sum := new_sstatus.sum
}
}
when (decoded_addr(CSRs.sip)) {
val new_sip = ((read_mip & ~read_mideleg) | (wdata & read_mideleg)).asTypeOf(new MIP())
reg_mip.ssip := new_sip.ssip
}
when (decoded_addr(CSRs.satp)) {
if (usingVM) {
val new_satp = wdata.asTypeOf(new PTBR())
when (new_satp.mode.isOneOf(satp_valid_modes.map(_.U))) {
reg_satp.mode := new_satp.mode & satp_valid_modes.reduce(_|_).U
reg_satp.ppn := new_satp.ppn(ppnBits-1,0)
if (asIdBits > 0) reg_satp.asid := new_satp.asid(asIdBits-1,0)
}
}
}
when (decoded_addr(CSRs.sie)) { reg_mie := (reg_mie & ~sie_mask) | (wdata & sie_mask) }
when (decoded_addr(CSRs.sscratch)) { reg_sscratch := wdata }
when (decoded_addr(CSRs.sepc)) { reg_sepc := formEPC(wdata) }
when (decoded_addr(CSRs.stvec)) { reg_stvec := wdata }
when (decoded_addr(CSRs.scause)) { reg_scause := wdata & scause_mask }
when (decoded_addr(CSRs.stval)) { reg_stval := wdata }
when (decoded_addr(CSRs.mideleg)) { reg_mideleg := wdata }
when (decoded_addr(CSRs.medeleg)) { reg_medeleg := wdata }
when (decoded_addr(CSRs.scounteren)) { reg_scounteren := wdata }
when (decoded_addr(CSRs.senvcfg)) { reg_senvcfg.write(wdata) }
}
if (usingHypervisor) {
when (decoded_addr(CSRs.hstatus)) {
val new_hstatus = wdata.asTypeOf(new HStatus())
reg_hstatus.gva := new_hstatus.gva
reg_hstatus.spv := new_hstatus.spv
reg_hstatus.spvp := new_hstatus.spvp
reg_hstatus.hu := new_hstatus.hu
reg_hstatus.vtvm := new_hstatus.vtvm
reg_hstatus.vtw := new_hstatus.vtw
reg_hstatus.vtsr := new_hstatus.vtsr
reg_hstatus.vsxl := new_hstatus.vsxl
}
when (decoded_addr(CSRs.hideleg)) { reg_hideleg := wdata }
when (decoded_addr(CSRs.hedeleg)) { reg_hedeleg := wdata }
when (decoded_addr(CSRs.hgatp)) {
val new_hgatp = wdata.asTypeOf(new PTBR())
val valid_modes = 0 +: (minPgLevels to pgLevels).map(new_hgatp.pgLevelsToMode(_))
when (new_hgatp.mode.isOneOf(valid_modes.map(_.U))) {
reg_hgatp.mode := new_hgatp.mode & valid_modes.reduce(_|_).U
}
reg_hgatp.ppn := Cat(new_hgatp.ppn(ppnBits-1,2), 0.U(2.W))
if (vmIdBits > 0) reg_hgatp.asid := new_hgatp.asid(vmIdBits-1,0)
}
when (decoded_addr(CSRs.hip)) {
val new_hip = ((read_mip & ~hs_delegable_interrupts) | (wdata & hs_delegable_interrupts)).asTypeOf(new MIP())
reg_mip.vssip := new_hip.vssip
}
when (decoded_addr(CSRs.hie)) { reg_mie := (reg_mie & ~hs_delegable_interrupts) | (wdata & hs_delegable_interrupts) }
when (decoded_addr(CSRs.hvip)) {
val new_sip = ((read_mip & ~hs_delegable_interrupts) | (wdata & hs_delegable_interrupts)).asTypeOf(new MIP())
reg_mip.vssip := new_sip.vssip
reg_mip.vstip := new_sip.vstip
reg_mip.vseip := new_sip.vseip
}
when (decoded_addr(CSRs.hcounteren)) { reg_hcounteren := wdata }
when (decoded_addr(CSRs.htval)) { reg_htval := wdata }
when (decoded_addr(CSRs.mtval2)) { reg_mtval2 := wdata }
val write_mhtinst_read_pseudo = wdata(13) && (xLen == 32).option(true.B).getOrElse(wdata(12))
when(decoded_addr(CSRs.mtinst)) { reg_mtinst_read_pseudo := write_mhtinst_read_pseudo }
when(decoded_addr(CSRs.htinst)) { reg_htinst_read_pseudo := write_mhtinst_read_pseudo }
when (decoded_addr(CSRs.vsstatus)) {
val new_vsstatus = wdata.asTypeOf(new MStatus())
reg_vsstatus.sie := new_vsstatus.sie
reg_vsstatus.spie := new_vsstatus.spie
reg_vsstatus.spp := new_vsstatus.spp
reg_vsstatus.mxr := new_vsstatus.mxr
reg_vsstatus.sum := new_vsstatus.sum
reg_vsstatus.fs := formFS(new_vsstatus.fs)
reg_vsstatus.vs := formVS(new_vsstatus.vs)
}
when (decoded_addr(CSRs.vsip)) {
val new_vsip = ((read_hip & ~read_hideleg) | ((wdata << 1) & read_hideleg)).asTypeOf(new MIP())
reg_mip.vssip := new_vsip.vssip
}
when (decoded_addr(CSRs.vsatp)) {
val new_vsatp = wdata.asTypeOf(new PTBR())
val mode_ok = new_vsatp.mode.isOneOf(satp_valid_modes.map(_.U))
when (mode_ok) {
reg_vsatp.mode := new_vsatp.mode & satp_valid_modes.reduce(_|_).U
}
when (mode_ok || !reg_mstatus.v) {
reg_vsatp.ppn := new_vsatp.ppn(vpnBits.min(new_vsatp.ppn.getWidth)-1,0)
if (asIdBits > 0) reg_vsatp.asid := new_vsatp.asid(asIdBits-1,0)
}
}
when (decoded_addr(CSRs.vsie)) { reg_mie := (reg_mie & ~read_hideleg) | ((wdata << 1) & read_hideleg) }
when (decoded_addr(CSRs.vsscratch)) { reg_vsscratch := wdata }
when (decoded_addr(CSRs.vsepc)) { reg_vsepc := formEPC(wdata) }
when (decoded_addr(CSRs.vstvec)) { reg_vstvec := wdata }
when (decoded_addr(CSRs.vscause)) { reg_vscause := wdata & scause_mask }
when (decoded_addr(CSRs.vstval)) { reg_vstval := wdata }
when (decoded_addr(CSRs.henvcfg)) { reg_henvcfg.write(wdata) }
}
if (usingUser) {
when (decoded_addr(CSRs.mcounteren)) { reg_mcounteren := wdata }
when (decoded_addr(CSRs.menvcfg)) { reg_menvcfg.write(wdata) }
}
if (nBreakpoints > 0) {
when (decoded_addr(CSRs.tselect)) { reg_tselect := wdata }
for ((bp, i) <- reg_bp.zipWithIndex) {
when (i.U === reg_tselect && (!bp.control.dmode || reg_debug)) {
when (decoded_addr(CSRs.tdata2)) { bp.address := wdata }
when (decoded_addr(CSRs.tdata3)) {
if (coreParams.mcontextWidth > 0) {
bp.textra.mselect := wdata(bp.textra.mselectPos)
bp.textra.mvalue := wdata >> bp.textra.mvaluePos
}
if (coreParams.scontextWidth > 0) {
bp.textra.sselect := wdata(bp.textra.sselectPos)
bp.textra.svalue := wdata >> bp.textra.svaluePos
}
}
when (decoded_addr(CSRs.tdata1)) {
bp.control := wdata.asTypeOf(bp.control)
val prevChain = if (i == 0) false.B else reg_bp(i-1).control.chain
val prevDMode = if (i == 0) false.B else reg_bp(i-1).control.dmode
val nextChain = if (i >= nBreakpoints-1) true.B else reg_bp(i+1).control.chain
val nextDMode = if (i >= nBreakpoints-1) true.B else reg_bp(i+1).control.dmode
val newBPC = readModifyWriteCSR(io.rw.cmd, bp.control.asUInt, io.rw.wdata).asTypeOf(bp.control)
val dMode = newBPC.dmode && reg_debug && (prevDMode || !prevChain)
bp.control.dmode := dMode
when (dMode || (newBPC.action > 1.U)) { bp.control.action := newBPC.action }.otherwise { bp.control.action := 0.U }
bp.control.chain := newBPC.chain && !(prevChain || nextChain) && (dMode || !nextDMode)
}
}
}
}
reg_mcontext.foreach { r => when (decoded_addr(CSRs.mcontext)) { r := wdata }}
reg_scontext.foreach { r => when (decoded_addr(CSRs.scontext)) { r := wdata }}
if (reg_pmp.nonEmpty) for (((pmp, next), i) <- (reg_pmp zip (reg_pmp.tail :+ reg_pmp.last)).zipWithIndex) {
require(xLen % pmp.cfg.getWidth == 0)
when (decoded_addr(CSRs.pmpcfg0 + pmpCfgIndex(i)) && !pmp.cfgLocked) {
val newCfg = (wdata >> ((i * pmp.cfg.getWidth) % xLen)).asTypeOf(new PMPConfig())
pmp.cfg := newCfg
// disallow unreadable but writable PMPs
pmp.cfg.w := newCfg.w && newCfg.r
// can't select a=NA4 with coarse-grained PMPs
if (pmpGranularity.log2 > PMP.lgAlign)
pmp.cfg.a := Cat(newCfg.a(1), newCfg.a.orR)
}
when (decoded_addr(CSRs.pmpaddr0 + i) && !pmp.addrLocked(next)) {
pmp.addr := wdata
}
}
def writeCustomCSR(io: CustomCSRIO, csr: CustomCSR, reg: UInt) = {
val mask = csr.mask.U(xLen.W)
when (decoded_addr(csr.id)) {
reg := (wdata & mask) | (reg & ~mask)
io.wen := true.B
}
}
for ((io, csr, reg) <- (io.customCSRs, customCSRs, reg_custom).zipped) {
writeCustomCSR(io, csr, reg)
}
for ((io, csr, reg) <- (io.roccCSRs, roccCSRs, reg_rocc).zipped) {
writeCustomCSR(io, csr, reg)
}
if (usingVector) {
when (decoded_addr(CSRs.vstart)) { set_vs_dirty := true.B; reg_vstart.get := wdata }
when (decoded_addr(CSRs.vxrm)) { set_vs_dirty := true.B; reg_vxrm.get := wdata }
when (decoded_addr(CSRs.vxsat)) { set_vs_dirty := true.B; reg_vxsat.get := wdata }
when (decoded_addr(CSRs.vcsr)) {
set_vs_dirty := true.B
reg_vxsat.get := wdata
reg_vxrm.get := wdata >> 1
}
}
}
def setCustomCSR(io: CustomCSRIO, csr: CustomCSR, reg: UInt) = {
val mask = csr.mask.U(xLen.W)
when (io.set) {
reg := (io.sdata & mask) | (reg & ~mask)
}
}
for ((io, csr, reg) <- (io.customCSRs, customCSRs, reg_custom).zipped) {
setCustomCSR(io, csr, reg)
}
for ((io, csr, reg) <- (io.roccCSRs, roccCSRs, reg_rocc).zipped) {
setCustomCSR(io, csr, reg)
}
io.vector.map { vio =>
when (vio.set_vconfig.valid) {
// user of CSRFile is responsible for set_vs_dirty in this case
assert(vio.set_vconfig.bits.vl <= vio.set_vconfig.bits.vtype.vlMax)
reg_vconfig.get := vio.set_vconfig.bits
}
when (vio.set_vstart.valid) {
set_vs_dirty := true.B
reg_vstart.get := vio.set_vstart.bits
}
vio.vstart := reg_vstart.get
vio.vconfig := reg_vconfig.get
vio.vxrm := reg_vxrm.get
when (reset.asBool) {
reg_vconfig.get.vl := 0.U
reg_vconfig.get.vtype := 0.U.asTypeOf(new VType)
reg_vconfig.get.vtype.vill := true.B
}
}
when(reset.asBool) {
reg_satp.mode := 0.U
reg_vsatp.mode := 0.U
reg_hgatp.mode := 0.U
}
if (!usingVM) {
reg_satp.mode := 0.U
reg_satp.ppn := 0.U
reg_satp.asid := 0.U
}
if (!usingHypervisor) {
reg_vsatp.mode := 0.U
reg_vsatp.ppn := 0.U
reg_vsatp.asid := 0.U
reg_hgatp.mode := 0.U
reg_hgatp.ppn := 0.U
reg_hgatp.asid := 0.U
}
if (!(asIdBits > 0)) {
reg_satp.asid := 0.U
reg_vsatp.asid := 0.U
}
if (!(vmIdBits > 0)) {
reg_hgatp.asid := 0.U
}
reg_vsstatus.xs := (if (usingRoCC) 3.U else 0.U)
if (nBreakpoints <= 1) reg_tselect := 0.U
for (bpc <- reg_bp map {_.control}) {
bpc.ttype := bpc.tType.U
bpc.maskmax := bpc.maskMax.U
bpc.reserved := 0.U
bpc.zero := 0.U
bpc.h := false.B
if (!usingSupervisor) bpc.s := false.B
if (!usingUser) bpc.u := false.B
if (!usingSupervisor && !usingUser) bpc.m := true.B
when (reset.asBool) {
bpc.action := 0.U
bpc.dmode := false.B
bpc.chain := false.B
bpc.r := false.B
bpc.w := false.B
bpc.x := false.B
}
}
for (bpx <- reg_bp map {_.textra}) {
if (coreParams.mcontextWidth == 0) bpx.mselect := false.B
if (coreParams.scontextWidth == 0) bpx.sselect := false.B
}
for (bp <- reg_bp drop nBreakpoints)
bp := 0.U.asTypeOf(new BP())
for (pmp <- reg_pmp) {
pmp.cfg.res := 0.U
when (reset.asBool) { pmp.reset() }
}
for (((t, insn), i) <- (io.trace zip io.inst).zipWithIndex) {
t.exception := io.retire >= i.U && exception
t.valid := io.retire > i.U || t.exception
t.insn := insn
t.iaddr := io.pc
t.priv := Cat(reg_debug, reg_mstatus.prv)
t.cause := cause
t.interrupt := cause(xLen-1)
t.tval := io.tval
t.wdata.foreach(_ := DontCare)
}
def chooseInterrupt(masksIn: Seq[UInt]): (Bool, UInt) = {
val nonstandard = supported_interrupts.getWidth-1 to 12 by -1
// MEI, MSI, MTI, SEI, SSI, STI, VSEI, VSSI, VSTI, UEI, USI, UTI
val standard = Seq(11, 3, 7, 9, 1, 5, 10, 2, 6, 8, 0, 4)
val priority = nonstandard ++ standard
val masks = masksIn.reverse
val any = masks.flatMap(m => priority.filter(_ < m.getWidth).map(i => m(i))).reduce(_||_)
val which = PriorityMux(masks.flatMap(m => priority.filter(_ < m.getWidth).map(i => (m(i), i.U))))
(any, which)
}
def readModifyWriteCSR(cmd: UInt, rdata: UInt, wdata: UInt) = {
(Mux(cmd(1), rdata, 0.U) | wdata) & ~Mux(cmd(1,0).andR, wdata, 0.U)
}
def legalizePrivilege(priv: UInt): UInt =
if (usingSupervisor) Mux(priv === PRV.H.U, PRV.U.U, priv)
else if (usingUser) Fill(2, priv(0))
else PRV.M.U
def trimPrivilege(priv: UInt): UInt =
if (usingSupervisor) priv
else legalizePrivilege(priv)
def writeCounter(lo: Int, ctr: WideCounter, wdata: UInt) = {
if (xLen == 32) {
val hi = lo + CSRs.mcycleh - CSRs.mcycle
when (decoded_addr(lo)) { ctr := Cat(ctr(ctr.getWidth-1, 32), wdata) }
when (decoded_addr(hi)) { ctr := Cat(wdata(ctr.getWidth-33, 0), ctr(31, 0)) }
} else {
when (decoded_addr(lo)) { ctr := wdata(ctr.getWidth-1, 0) }
}
}
def formEPC(x: UInt) = ~(~x | (if (usingCompressed) 1.U else 3.U))
def readEPC(x: UInt) = ~(~x | Mux(reg_misa('c' - 'a'), 1.U, 3.U))
def formTVec(x: UInt) = x andNot Mux(x(0), ((((BigInt(1) << mtvecInterruptAlign) - 1) << mtvecBaseAlign) | 2).U, 2.U)
def isaStringToMask(s: String) = s.map(x => 1 << (x - 'A')).foldLeft(0)(_|_)
def formFS(fs: UInt) = if (coreParams.haveFSDirty) fs else Fill(2, fs.orR)
def formVS(vs: UInt) = if (usingVector) vs else 0.U
}
| module CSRFile( // @[CSR.scala:377:7]
input clock, // @[CSR.scala:377:7]
input reset, // @[CSR.scala:377:7]
input io_ungated_clock, // @[CSR.scala:384:14]
input io_interrupts_debug, // @[CSR.scala:384:14]
input io_interrupts_mtip, // @[CSR.scala:384:14]
input io_interrupts_msip, // @[CSR.scala:384:14]
input io_interrupts_meip, // @[CSR.scala:384:14]
input io_interrupts_seip, // @[CSR.scala:384:14]
input [2:0] io_hartid, // @[CSR.scala:384:14]
input [11:0] io_rw_addr, // @[CSR.scala:384:14]
input [2:0] io_rw_cmd, // @[CSR.scala:384:14]
output [63:0] io_rw_rdata, // @[CSR.scala:384:14]
input [63:0] io_rw_wdata, // @[CSR.scala:384:14]
input [31:0] io_decode_0_inst, // @[CSR.scala:384:14]
output io_decode_0_fp_illegal, // @[CSR.scala:384:14]
output io_decode_0_fp_csr, // @[CSR.scala:384:14]
output io_decode_0_read_illegal, // @[CSR.scala:384:14]
output io_decode_0_write_illegal, // @[CSR.scala:384:14]
output io_decode_0_write_flush, // @[CSR.scala:384:14]
output io_decode_0_system_illegal, // @[CSR.scala:384:14]
output io_decode_0_virtual_access_illegal, // @[CSR.scala:384:14]
output io_decode_0_virtual_system_illegal, // @[CSR.scala:384:14]
output io_csr_stall, // @[CSR.scala:384:14]
output io_eret, // @[CSR.scala:384:14]
output io_singleStep, // @[CSR.scala:384:14]
output io_status_debug, // @[CSR.scala:384:14]
output io_status_wfi, // @[CSR.scala:384:14]
output [31:0] io_status_isa, // @[CSR.scala:384:14]
output [1:0] io_status_dprv, // @[CSR.scala:384:14]
output io_status_dv, // @[CSR.scala:384:14]
output [1:0] io_status_prv, // @[CSR.scala:384:14]
output io_status_v, // @[CSR.scala:384:14]
output io_status_mxr, // @[CSR.scala:384:14]
output io_status_sum, // @[CSR.scala:384:14]
output [3:0] io_ptbr_mode, // @[CSR.scala:384:14]
output [43:0] io_ptbr_ppn, // @[CSR.scala:384:14]
output [39:0] io_evec, // @[CSR.scala:384:14]
input io_exception, // @[CSR.scala:384:14]
input io_retire, // @[CSR.scala:384:14]
input [63:0] io_cause, // @[CSR.scala:384:14]
input [39:0] io_pc, // @[CSR.scala:384:14]
input [39:0] io_tval, // @[CSR.scala:384:14]
input io_gva, // @[CSR.scala:384:14]
output [63:0] io_time, // @[CSR.scala:384:14]
output [2:0] io_fcsr_rm, // @[CSR.scala:384:14]
input io_fcsr_flags_valid, // @[CSR.scala:384:14]
input [4:0] io_fcsr_flags_bits, // @[CSR.scala:384:14]
output io_interrupt, // @[CSR.scala:384:14]
output [63:0] io_interrupt_cause, // @[CSR.scala:384:14]
output io_bp_0_control_action, // @[CSR.scala:384:14]
output [1:0] io_bp_0_control_tmatch, // @[CSR.scala:384:14]
output io_bp_0_control_m, // @[CSR.scala:384:14]
output io_bp_0_control_s, // @[CSR.scala:384:14]
output io_bp_0_control_u, // @[CSR.scala:384:14]
output io_bp_0_control_x, // @[CSR.scala:384:14]
output io_bp_0_control_w, // @[CSR.scala:384:14]
output io_bp_0_control_r, // @[CSR.scala:384:14]
output [38:0] io_bp_0_address, // @[CSR.scala:384:14]
output io_pmp_0_cfg_l, // @[CSR.scala:384:14]
output [1:0] io_pmp_0_cfg_a, // @[CSR.scala:384:14]
output io_pmp_0_cfg_x, // @[CSR.scala:384:14]
output io_pmp_0_cfg_w, // @[CSR.scala:384:14]
output io_pmp_0_cfg_r, // @[CSR.scala:384:14]
output [29:0] io_pmp_0_addr, // @[CSR.scala:384:14]
output [31:0] io_pmp_0_mask, // @[CSR.scala:384:14]
output io_pmp_1_cfg_l, // @[CSR.scala:384:14]
output [1:0] io_pmp_1_cfg_a, // @[CSR.scala:384:14]
output io_pmp_1_cfg_x, // @[CSR.scala:384:14]
output io_pmp_1_cfg_w, // @[CSR.scala:384:14]
output io_pmp_1_cfg_r, // @[CSR.scala:384:14]
output [29:0] io_pmp_1_addr, // @[CSR.scala:384:14]
output [31:0] io_pmp_1_mask, // @[CSR.scala:384:14]
output io_pmp_2_cfg_l, // @[CSR.scala:384:14]
output [1:0] io_pmp_2_cfg_a, // @[CSR.scala:384:14]
output io_pmp_2_cfg_x, // @[CSR.scala:384:14]
output io_pmp_2_cfg_w, // @[CSR.scala:384:14]
output io_pmp_2_cfg_r, // @[CSR.scala:384:14]
output [29:0] io_pmp_2_addr, // @[CSR.scala:384:14]
output [31:0] io_pmp_2_mask, // @[CSR.scala:384:14]
output io_pmp_3_cfg_l, // @[CSR.scala:384:14]
output [1:0] io_pmp_3_cfg_a, // @[CSR.scala:384:14]
output io_pmp_3_cfg_x, // @[CSR.scala:384:14]
output io_pmp_3_cfg_w, // @[CSR.scala:384:14]
output io_pmp_3_cfg_r, // @[CSR.scala:384:14]
output [29:0] io_pmp_3_addr, // @[CSR.scala:384:14]
output [31:0] io_pmp_3_mask, // @[CSR.scala:384:14]
output io_pmp_4_cfg_l, // @[CSR.scala:384:14]
output [1:0] io_pmp_4_cfg_a, // @[CSR.scala:384:14]
output io_pmp_4_cfg_x, // @[CSR.scala:384:14]
output io_pmp_4_cfg_w, // @[CSR.scala:384:14]
output io_pmp_4_cfg_r, // @[CSR.scala:384:14]
output [29:0] io_pmp_4_addr, // @[CSR.scala:384:14]
output [31:0] io_pmp_4_mask, // @[CSR.scala:384:14]
output io_pmp_5_cfg_l, // @[CSR.scala:384:14]
output [1:0] io_pmp_5_cfg_a, // @[CSR.scala:384:14]
output io_pmp_5_cfg_x, // @[CSR.scala:384:14]
output io_pmp_5_cfg_w, // @[CSR.scala:384:14]
output io_pmp_5_cfg_r, // @[CSR.scala:384:14]
output [29:0] io_pmp_5_addr, // @[CSR.scala:384:14]
output [31:0] io_pmp_5_mask, // @[CSR.scala:384:14]
output io_pmp_6_cfg_l, // @[CSR.scala:384:14]
output [1:0] io_pmp_6_cfg_a, // @[CSR.scala:384:14]
output io_pmp_6_cfg_x, // @[CSR.scala:384:14]
output io_pmp_6_cfg_w, // @[CSR.scala:384:14]
output io_pmp_6_cfg_r, // @[CSR.scala:384:14]
output [29:0] io_pmp_6_addr, // @[CSR.scala:384:14]
output [31:0] io_pmp_6_mask, // @[CSR.scala:384:14]
output io_pmp_7_cfg_l, // @[CSR.scala:384:14]
output [1:0] io_pmp_7_cfg_a, // @[CSR.scala:384:14]
output io_pmp_7_cfg_x, // @[CSR.scala:384:14]
output io_pmp_7_cfg_w, // @[CSR.scala:384:14]
output io_pmp_7_cfg_r, // @[CSR.scala:384:14]
output [29:0] io_pmp_7_addr, // @[CSR.scala:384:14]
output [31:0] io_pmp_7_mask, // @[CSR.scala:384:14]
output io_inhibit_cycle, // @[CSR.scala:384:14]
input [31:0] io_inst_0, // @[CSR.scala:384:14]
output io_trace_0_valid, // @[CSR.scala:384:14]
output [39:0] io_trace_0_iaddr, // @[CSR.scala:384:14]
output [31:0] io_trace_0_insn, // @[CSR.scala:384:14]
output io_trace_0_exception, // @[CSR.scala:384:14]
output [63:0] io_customCSRs_0_value // @[CSR.scala:384:14]
);
wire [63:0] _io_rw_rdata_T_298; // @[Mux.scala:30:73]
reg io_status_cease_r; // @[CSR.scala:1162:31]
wire io_singleStep_0; // @[CSR.scala:1001:34]
reg [1:0] reg_mstatus_prv; // @[CSR.scala:395:28]
reg reg_mstatus_v; // @[CSR.scala:395:28]
reg reg_mstatus_mpv; // @[CSR.scala:395:28]
reg reg_mstatus_gva; // @[CSR.scala:395:28]
reg reg_mstatus_tsr; // @[CSR.scala:395:28]
reg reg_mstatus_tw; // @[CSR.scala:395:28]
reg reg_mstatus_tvm; // @[CSR.scala:395:28]
reg reg_mstatus_mxr; // @[CSR.scala:395:28]
reg reg_mstatus_sum; // @[CSR.scala:395:28]
reg reg_mstatus_mprv; // @[CSR.scala:395:28]
reg [1:0] reg_mstatus_fs; // @[CSR.scala:395:28]
reg [1:0] reg_mstatus_mpp; // @[CSR.scala:395:28]
reg reg_mstatus_spp; // @[CSR.scala:395:28]
reg reg_mstatus_mpie; // @[CSR.scala:395:28]
reg reg_mstatus_spie; // @[CSR.scala:395:28]
reg reg_mstatus_mie; // @[CSR.scala:395:28]
reg reg_mstatus_sie; // @[CSR.scala:395:28]
reg reg_dcsr_ebreakm; // @[CSR.scala:403:25]
reg reg_dcsr_ebreaks; // @[CSR.scala:403:25]
reg reg_dcsr_ebreaku; // @[CSR.scala:403:25]
reg [2:0] reg_dcsr_cause; // @[CSR.scala:403:25]
reg reg_dcsr_v; // @[CSR.scala:403:25]
reg reg_dcsr_step; // @[CSR.scala:403:25]
reg [1:0] reg_dcsr_prv; // @[CSR.scala:403:25]
reg reg_debug; // @[CSR.scala:482:26]
reg [39:0] reg_dpc; // @[CSR.scala:483:20]
reg [63:0] reg_dscratch0; // @[CSR.scala:484:26]
reg reg_singleStepped; // @[CSR.scala:486:30]
reg reg_bp_0_control_dmode; // @[CSR.scala:492:19]
reg reg_bp_0_control_action; // @[CSR.scala:492:19]
reg [1:0] reg_bp_0_control_tmatch; // @[CSR.scala:492:19]
reg reg_bp_0_control_m; // @[CSR.scala:492:19]
reg reg_bp_0_control_s; // @[CSR.scala:492:19]
reg reg_bp_0_control_u; // @[CSR.scala:492:19]
reg reg_bp_0_control_x; // @[CSR.scala:492:19]
reg reg_bp_0_control_w; // @[CSR.scala:492:19]
reg reg_bp_0_control_r; // @[CSR.scala:492:19]
reg [38:0] reg_bp_0_address; // @[CSR.scala:492:19]
reg reg_pmp_0_cfg_l; // @[CSR.scala:493:20]
reg [1:0] reg_pmp_0_cfg_a; // @[CSR.scala:493:20]
reg reg_pmp_0_cfg_x; // @[CSR.scala:493:20]
reg reg_pmp_0_cfg_w; // @[CSR.scala:493:20]
reg reg_pmp_0_cfg_r; // @[CSR.scala:493:20]
reg [29:0] reg_pmp_0_addr; // @[CSR.scala:493:20]
reg reg_pmp_1_cfg_l; // @[CSR.scala:493:20]
reg [1:0] reg_pmp_1_cfg_a; // @[CSR.scala:493:20]
reg reg_pmp_1_cfg_x; // @[CSR.scala:493:20]
reg reg_pmp_1_cfg_w; // @[CSR.scala:493:20]
reg reg_pmp_1_cfg_r; // @[CSR.scala:493:20]
reg [29:0] reg_pmp_1_addr; // @[CSR.scala:493:20]
reg reg_pmp_2_cfg_l; // @[CSR.scala:493:20]
reg [1:0] reg_pmp_2_cfg_a; // @[CSR.scala:493:20]
reg reg_pmp_2_cfg_x; // @[CSR.scala:493:20]
reg reg_pmp_2_cfg_w; // @[CSR.scala:493:20]
reg reg_pmp_2_cfg_r; // @[CSR.scala:493:20]
reg [29:0] reg_pmp_2_addr; // @[CSR.scala:493:20]
reg reg_pmp_3_cfg_l; // @[CSR.scala:493:20]
reg [1:0] reg_pmp_3_cfg_a; // @[CSR.scala:493:20]
reg reg_pmp_3_cfg_x; // @[CSR.scala:493:20]
reg reg_pmp_3_cfg_w; // @[CSR.scala:493:20]
reg reg_pmp_3_cfg_r; // @[CSR.scala:493:20]
reg [29:0] reg_pmp_3_addr; // @[CSR.scala:493:20]
reg reg_pmp_4_cfg_l; // @[CSR.scala:493:20]
reg [1:0] reg_pmp_4_cfg_a; // @[CSR.scala:493:20]
reg reg_pmp_4_cfg_x; // @[CSR.scala:493:20]
reg reg_pmp_4_cfg_w; // @[CSR.scala:493:20]
reg reg_pmp_4_cfg_r; // @[CSR.scala:493:20]
reg [29:0] reg_pmp_4_addr; // @[CSR.scala:493:20]
reg reg_pmp_5_cfg_l; // @[CSR.scala:493:20]
reg [1:0] reg_pmp_5_cfg_a; // @[CSR.scala:493:20]
reg reg_pmp_5_cfg_x; // @[CSR.scala:493:20]
reg reg_pmp_5_cfg_w; // @[CSR.scala:493:20]
reg reg_pmp_5_cfg_r; // @[CSR.scala:493:20]
reg [29:0] reg_pmp_5_addr; // @[CSR.scala:493:20]
reg reg_pmp_6_cfg_l; // @[CSR.scala:493:20]
reg [1:0] reg_pmp_6_cfg_a; // @[CSR.scala:493:20]
reg reg_pmp_6_cfg_x; // @[CSR.scala:493:20]
reg reg_pmp_6_cfg_w; // @[CSR.scala:493:20]
reg reg_pmp_6_cfg_r; // @[CSR.scala:493:20]
reg [29:0] reg_pmp_6_addr; // @[CSR.scala:493:20]
reg reg_pmp_7_cfg_l; // @[CSR.scala:493:20]
reg [1:0] reg_pmp_7_cfg_a; // @[CSR.scala:493:20]
reg reg_pmp_7_cfg_x; // @[CSR.scala:493:20]
reg reg_pmp_7_cfg_w; // @[CSR.scala:493:20]
reg reg_pmp_7_cfg_r; // @[CSR.scala:493:20]
reg [29:0] reg_pmp_7_addr; // @[CSR.scala:493:20]
reg [63:0] reg_mie; // @[CSR.scala:495:20]
reg [63:0] reg_mideleg; // @[CSR.scala:497:18]
wire [8:0] _GEN = reg_mideleg[9:1] & 9'h111; // @[CSR.scala:497:18, :498:38]
wire [15:0] _GEN_0 = {6'h0, _GEN, 1'h0}; // @[CSR.scala:498:38]
wire [63:0] _GEN_1 = {54'h0, _GEN, 1'h0}; // @[CSR.scala:498:38]
reg [63:0] reg_medeleg; // @[CSR.scala:501:18]
wire [63:0] _GEN_2 = {48'h0, reg_medeleg[15:0] & 16'hB15D}; // @[CSR.scala:501:18, :502:38]
reg reg_mip_seip; // @[CSR.scala:504:20]
reg reg_mip_stip; // @[CSR.scala:504:20]
reg reg_mip_ssip; // @[CSR.scala:504:20]
reg [39:0] reg_mepc; // @[CSR.scala:505:21]
reg [63:0] reg_mcause; // @[CSR.scala:506:27]
reg [39:0] reg_mtval; // @[CSR.scala:507:22]
reg [63:0] reg_mscratch; // @[CSR.scala:509:25]
reg [31:0] reg_mtvec; // @[CSR.scala:512:31]
reg reg_menvcfg_fiom; // @[CSR.scala:525:28]
reg reg_senvcfg_fiom; // @[CSR.scala:526:28]
reg [31:0] reg_mcounteren; // @[CSR.scala:531:18]
wire [31:0] _GEN_3 = {29'h0, reg_mcounteren[2:0]}; // @[CSR.scala:531:18, :532:32]
reg [31:0] reg_scounteren; // @[CSR.scala:535:18]
wire [31:0] _GEN_4 = {29'h0, reg_scounteren[2:0]}; // @[CSR.scala:535:18, :536:38]
reg reg_vsstatus_spp; // @[CSR.scala:562:25]
reg [39:0] reg_vsepc; // @[CSR.scala:564:22]
reg [39:0] reg_sepc; // @[CSR.scala:569:21]
reg [63:0] reg_scause; // @[CSR.scala:570:23]
reg [39:0] reg_stval; // @[CSR.scala:571:22]
reg [63:0] reg_sscratch; // @[CSR.scala:572:25]
reg [38:0] reg_stvec; // @[CSR.scala:573:22]
reg [3:0] reg_satp_mode; // @[CSR.scala:574:21]
reg [43:0] reg_satp_ppn; // @[CSR.scala:574:21]
reg reg_wfi; // @[CSR.scala:575:54]
reg [4:0] reg_fflags; // @[CSR.scala:577:23]
reg [2:0] reg_frm; // @[CSR.scala:578:20]
reg [2:0] reg_mcountinhibit; // @[CSR.scala:590:34]
reg [5:0] small_0; // @[Counters.scala:45:41]
reg [57:0] large_0; // @[Counters.scala:50:31]
wire [63:0] value = {large_0, small_0}; // @[Counters.scala:45:41, :50:31, :55:30]
reg [5:0] small_1; // @[Counters.scala:45:41]
reg [57:0] large_1; // @[Counters.scala:50:31]
wire [63:0] io_time_0 = {large_1, small_1}; // @[Counters.scala:45:41, :50:31, :55:30]
wire [15:0] read_mip = {4'h0, io_interrupts_meip, 1'h0, reg_mip_seip | io_interrupts_seip, 1'h0, io_interrupts_mtip, 1'h0, reg_mip_stip, 1'h0, io_interrupts_msip, 1'h0, reg_mip_ssip, 1'h0}; // @[CSR.scala:504:20, :606:57, :610:22]
wire [15:0] _GEN_5 = reg_mie[15:0] & read_mip; // @[CSR.scala:495:20, :610:22, :614:56]
wire [15:0] m_interrupts = ~(reg_mstatus_prv[1]) | reg_mstatus_mie ? ~(~_GEN_5 | _GEN_0) : 16'h0; // @[CSR.scala:395:28, :498:38, :614:56, :620:{25,51,62,83,85,105}]
wire [15:0] s_interrupts = reg_mstatus_v | reg_mstatus_prv == 2'h0 | reg_mstatus_prv == 2'h1 & reg_mstatus_sie ? _GEN_5 & _GEN_0 : 16'h0; // @[CSR.scala:395:28, :498:38, :614:56, :621:{25,49,68,78,98,110,151}]
wire [29:0] _GEN_6 = {reg_pmp_0_addr[28:0], reg_pmp_0_cfg_a[0]}; // @[PMP.scala:57:31, :58:23]
wire [29:0] _GEN_7 = {reg_pmp_1_addr[28:0], reg_pmp_1_cfg_a[0]}; // @[PMP.scala:57:31, :58:23]
wire [29:0] _GEN_8 = {reg_pmp_2_addr[28:0], reg_pmp_2_cfg_a[0]}; // @[PMP.scala:57:31, :58:23]
wire [29:0] _GEN_9 = {reg_pmp_3_addr[28:0], reg_pmp_3_cfg_a[0]}; // @[PMP.scala:57:31, :58:23]
wire [29:0] _GEN_10 = {reg_pmp_4_addr[28:0], reg_pmp_4_cfg_a[0]}; // @[PMP.scala:57:31, :58:23]
wire [29:0] _GEN_11 = {reg_pmp_5_addr[28:0], reg_pmp_5_cfg_a[0]}; // @[PMP.scala:57:31, :58:23]
wire [29:0] _GEN_12 = {reg_pmp_6_addr[28:0], reg_pmp_6_cfg_a[0]}; // @[PMP.scala:57:31, :58:23]
wire [29:0] _GEN_13 = {reg_pmp_7_addr[28:0], reg_pmp_7_cfg_a[0]}; // @[PMP.scala:57:31, :58:23]
reg [63:0] reg_misa; // @[CSR.scala:648:25]
wire [31:0] _read_mtvec_T_5 = reg_mtvec & {24'hFFFFFF, ~(reg_mtvec[0] ? 8'hFE : 8'h2)}; // @[package.scala:174:{35,37,41,46}]
wire [38:0] _read_stvec_T_5 = reg_stvec & {31'h7FFFFFFF, ~(reg_stvec[0] ? 8'hFE : 8'h2)}; // @[package.scala:174:{35,37,41,46}]
wire [39:0] _io_evec_T_20 = ~reg_mepc; // @[CSR.scala:505:21, :1665:28]
wire [39:0] _read_mapping_T_6 = ~{_io_evec_T_20[39:2], _io_evec_T_20[1:0] | {~(reg_misa[2]), 1'h1}}; // @[CSR.scala:648:25, :1665:{26,28,31,36,45}]
wire [39:0] _io_evec_T_10 = ~reg_dpc; // @[CSR.scala:483:20, :1665:28]
wire [39:0] _debug_csrs_T_4 = ~{_io_evec_T_10[39:2], _io_evec_T_10[1:0] | {~(reg_misa[2]), 1'h1}}; // @[CSR.scala:648:25, :1665:{26,28,31,36,45}]
wire [39:0] _io_evec_T = ~reg_sepc; // @[CSR.scala:569:21, :1665:28]
wire [39:0] _GEN_14 = ~{_io_evec_T[39:2], _io_evec_T[1:0] | {~(reg_misa[2]), 1'h1}}; // @[CSR.scala:648:25, :1665:{26,28,31,36,45}]
reg [63:0] reg_custom_0; // @[CSR.scala:798:43]
wire [11:0] decoded_addr_decoded_decoded_invInputs = ~io_rw_addr; // @[pla.scala:78:21]
wire [10:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T = {decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], decoded_addr_decoded_decoded_invInputs[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_1 = {decoded_addr_decoded_decoded_invInputs[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], decoded_addr_decoded_decoded_invInputs[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_2 = {io_rw_addr[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], decoded_addr_decoded_decoded_invInputs[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [9:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_3 = {decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_4 = {decoded_addr_decoded_decoded_invInputs[0], decoded_addr_decoded_decoded_invInputs[1], io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_5 = {io_rw_addr[0], decoded_addr_decoded_decoded_invInputs[1], io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [10:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_6 = {io_rw_addr[1], io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [8:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_7 = {io_rw_addr[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_8 = {decoded_addr_decoded_decoded_invInputs[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], io_rw_addr[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_9 = {io_rw_addr[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], io_rw_addr[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_10 = {decoded_addr_decoded_decoded_invInputs[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], io_rw_addr[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_11 = {io_rw_addr[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], io_rw_addr[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [9:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_12 = {io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], io_rw_addr[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [4:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_13 = {io_rw_addr[7], io_rw_addr[8], decoded_addr_decoded_decoded_invInputs[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_14 = {decoded_addr_decoded_decoded_invInputs[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_15 = {io_rw_addr[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_16 = {decoded_addr_decoded_decoded_invInputs[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_17 = {io_rw_addr[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_18 = {decoded_addr_decoded_decoded_invInputs[0], decoded_addr_decoded_decoded_invInputs[1], io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_19 = {io_rw_addr[0], decoded_addr_decoded_decoded_invInputs[1], io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [10:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_20 = {io_rw_addr[1], io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [8:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_21 = {io_rw_addr[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [10:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_22 = {decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_52 = {decoded_addr_decoded_decoded_invInputs[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], io_rw_addr[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_53 = {io_rw_addr[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], io_rw_addr[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_54 = {decoded_addr_decoded_decoded_invInputs[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], io_rw_addr[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_55 = {io_rw_addr[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], io_rw_addr[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [9:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_56 = {io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], io_rw_addr[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [10:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_57 = {decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_59 = {decoded_addr_decoded_decoded_invInputs[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_60 = {io_rw_addr[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_61 = {decoded_addr_decoded_decoded_invInputs[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_62 = {io_rw_addr[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_63 = {decoded_addr_decoded_decoded_invInputs[0], decoded_addr_decoded_decoded_invInputs[1], io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_64 = {io_rw_addr[0], decoded_addr_decoded_decoded_invInputs[1], io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_65 = {decoded_addr_decoded_decoded_invInputs[0], io_rw_addr[1], io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_66 = {io_rw_addr[0], io_rw_addr[1], io_rw_addr[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_76 = {io_rw_addr[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], io_rw_addr[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_77 = {decoded_addr_decoded_decoded_invInputs[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], io_rw_addr[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_79 = {decoded_addr_decoded_decoded_invInputs[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], io_rw_addr[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_80 = {io_rw_addr[0], decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], io_rw_addr[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [10:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_81 = {io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], io_rw_addr[4], io_rw_addr[5], decoded_addr_decoded_decoded_invInputs[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], io_rw_addr[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [5:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_82 = {io_rw_addr[6], io_rw_addr[7], io_rw_addr[8], io_rw_addr[9], io_rw_addr[10], decoded_addr_decoded_decoded_invInputs[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [10:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_83 = {decoded_addr_decoded_decoded_invInputs[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], io_rw_addr[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [11:0] _decoded_addr_decoded_decoded_andMatrixOutputs_T_84 = {decoded_addr_decoded_decoded_invInputs[0], io_rw_addr[1], decoded_addr_decoded_decoded_invInputs[2], decoded_addr_decoded_decoded_invInputs[3], decoded_addr_decoded_decoded_invInputs[4], decoded_addr_decoded_decoded_invInputs[5], decoded_addr_decoded_decoded_invInputs[6], decoded_addr_decoded_decoded_invInputs[7], io_rw_addr[8], io_rw_addr[9], decoded_addr_decoded_decoded_invInputs[10], io_rw_addr[11]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [63:0] _wdata_T_2 = (io_rw_cmd[1] ? _io_rw_rdata_T_298 : 64'h0) | io_rw_wdata; // @[Mux.scala:30:73]
wire [63:0] _wdata_T_6 = ~((&(io_rw_cmd[1:0])) ? io_rw_wdata : 64'h0); // @[CSR.scala:1643:{41,45,49,55}]
wire system_insn = io_rw_cmd == 3'h4; // @[CSR.scala:876:31]
wire [11:0] _GEN_15 = ~io_rw_addr; // @[pla.scala:78:21]
wire insn_call = system_insn & (&{_GEN_15[0], _GEN_15[1], _GEN_15[2], _GEN_15[3], _GEN_15[4], _GEN_15[5], _GEN_15[6], _GEN_15[7], _GEN_15[8], _GEN_15[9], _GEN_15[10], _GEN_15[11]}); // @[pla.scala:78:21, :91:29, :98:{53,70}]
wire insn_break = system_insn & (&{io_rw_addr[0], _GEN_15[1], _GEN_15[2], _GEN_15[3], _GEN_15[4], _GEN_15[5], _GEN_15[6], _GEN_15[7], _GEN_15[8], _GEN_15[9], _GEN_15[10], _GEN_15[11]}); // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}]
wire insn_ret = system_insn & (|{&{_GEN_15[2], _GEN_15[3], _GEN_15[4], _GEN_15[5], _GEN_15[6], _GEN_15[7], io_rw_addr[8], _GEN_15[10], _GEN_15[11]}, &{io_rw_addr[10], _GEN_15[11]}}); // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}]
wire [29:0] decoded_invInputs_1 = ~(io_decode_0_inst[31:2]); // @[pla.scala:78:21]
wire [9:0] _decoded_andMatrixOutputs_T_10 = {io_decode_0_inst[22], decoded_invInputs_1[21], decoded_invInputs_1[22], decoded_invInputs_1[23], decoded_invInputs_1[24], decoded_invInputs_1[25], io_decode_0_inst[28], decoded_invInputs_1[27], decoded_invInputs_1[28], decoded_invInputs_1[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [16:0] _decoded_andMatrixOutputs_T_11 = {io_decode_0_inst[0], io_decode_0_inst[1], decoded_invInputs_1[0], decoded_invInputs_1[1], io_decode_0_inst[4], io_decode_0_inst[5], io_decode_0_inst[6], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], io_decode_0_inst[25], decoded_invInputs_1[24], decoded_invInputs_1[25], io_decode_0_inst[28], decoded_invInputs_1[27], decoded_invInputs_1[28], decoded_invInputs_1[29]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53]
wire [1:0] _decoded_orMatrixOutputs_T_10 = {&{io_decode_0_inst[0], decoded_invInputs_1[20], decoded_invInputs_1[21], decoded_invInputs_1[22], decoded_invInputs_1[23], decoded_invInputs_1[24], decoded_invInputs_1[25], io_decode_0_inst[28], decoded_invInputs_1[28], decoded_invInputs_1[29]}, &{io_decode_0_inst[30], decoded_invInputs_1[29]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:19]
wire is_counter = io_decode_0_inst[31:20] > 12'hBFF & io_decode_0_inst[31:20] < 12'hC20 | io_decode_0_inst[31:20] > 12'hC7F & io_decode_0_inst[31:20] < 12'hCA0; // @[package.scala:213:{47,55,60}]
wire allow_sfence_vma = reg_mstatus_prv[1] | ~(~reg_mstatus_v & reg_mstatus_tvm); // @[CSR.scala:395:28, :906:61, :907:{70,73,77}]
wire [31:0] _GEN_16 = {27'h0, io_decode_0_inst[24:20]}; // @[CSR.scala:897:27, :911:28, :912:70]
wire [31:0] _io_decode_0_virtual_access_illegal_T_3 = _GEN_3 >> _GEN_16; // @[CSR.scala:532:32, :912:70]
wire [31:0] _io_decode_0_virtual_access_illegal_T_11 = _GEN_4 >> _GEN_16; // @[CSR.scala:536:38, :912:70, :913:75]
wire [31:0] _io_decode_0_virtual_access_illegal_T_6 = 32'h0 >> _GEN_16; // @[CSR.scala:912:70, :914:63]
wire io_decode_0_fp_illegal_0 = reg_mstatus_fs == 2'h0 | reg_mstatus_v | ~(reg_misa[5]); // @[CSR.scala:395:28, :648:25, :915:{39,47,91,94,103}]
wire [3:0] io_decode_0_fp_csr_invInputs = ~(io_decode_0_inst[31:28]); // @[pla.scala:78:21]
wire [3:0] _io_decode_0_fp_csr_andMatrixOutputs_T = {io_decode_0_fp_csr_invInputs[0], io_decode_0_fp_csr_invInputs[1], io_decode_0_fp_csr_invInputs[2], io_decode_0_fp_csr_invInputs[3]}; // @[pla.scala:78:21, :91:29, :98:53]
wire _io_decode_0_virtual_access_illegal_T_24 = io_decode_0_inst[31:20] == 12'h180; // @[CSR.scala:897:27, :899:93]
wire csr_exists =
io_decode_0_inst[31:20] == 12'h7A0 | io_decode_0_inst[31:20] == 12'h7A1 | io_decode_0_inst[31:20] == 12'h7A2 | io_decode_0_inst[31:20] == 12'h7A3 | io_decode_0_inst[31:20] == 12'h301 | io_decode_0_inst[31:20] == 12'h300 | io_decode_0_inst[31:20] == 12'h305 | io_decode_0_inst[31:20] == 12'h344 | io_decode_0_inst[31:20] == 12'h304 | io_decode_0_inst[31:20] == 12'h340 | io_decode_0_inst[31:20] == 12'h341 | io_decode_0_inst[31:20] == 12'h343 | io_decode_0_inst[31:20] == 12'h342 | io_decode_0_inst[31:20] == 12'hF14 | io_decode_0_inst[31:20] == 12'h7B0 | io_decode_0_inst[31:20] == 12'h7B1 | io_decode_0_inst[31:20] == 12'h7B2 | io_decode_0_inst[31:20] == 12'h1 | io_decode_0_inst[31:20] == 12'h2 | io_decode_0_inst[31:20] == 12'h3 | io_decode_0_inst[31:20] == 12'h320 | io_decode_0_inst[31:20] == 12'hB00 | io_decode_0_inst[31:20] == 12'hB02 | io_decode_0_inst[31:20] == 12'h323 | io_decode_0_inst[31:20] == 12'hB03 | io_decode_0_inst[31:20] == 12'hC03 | io_decode_0_inst[31:20] == 12'h324 | io_decode_0_inst[31:20] == 12'hB04 | io_decode_0_inst[31:20] == 12'hC04 | io_decode_0_inst[31:20] == 12'h325 | io_decode_0_inst[31:20] == 12'hB05 | io_decode_0_inst[31:20] == 12'hC05 | io_decode_0_inst[31:20] == 12'h326 | io_decode_0_inst[31:20] == 12'hB06 | io_decode_0_inst[31:20] == 12'hC06 | io_decode_0_inst[31:20] == 12'h327 | io_decode_0_inst[31:20] == 12'hB07 | io_decode_0_inst[31:20] == 12'hC07 | io_decode_0_inst[31:20] == 12'h328 | io_decode_0_inst[31:20] == 12'hB08 | io_decode_0_inst[31:20] == 12'hC08 | io_decode_0_inst[31:20] == 12'h329 | io_decode_0_inst[31:20] == 12'hB09 | io_decode_0_inst[31:20] == 12'hC09 | io_decode_0_inst[31:20] == 12'h32A | io_decode_0_inst[31:20] == 12'hB0A | io_decode_0_inst[31:20] == 12'hC0A | io_decode_0_inst[31:20] == 12'h32B | io_decode_0_inst[31:20] == 12'hB0B | io_decode_0_inst[31:20] == 12'hC0B | io_decode_0_inst[31:20] == 12'h32C | io_decode_0_inst[31:20] == 12'hB0C | io_decode_0_inst[31:20] == 12'hC0C | io_decode_0_inst[31:20] == 12'h32D | io_decode_0_inst[31:20] == 12'hB0D
| io_decode_0_inst[31:20] == 12'hC0D | io_decode_0_inst[31:20] == 12'h32E | io_decode_0_inst[31:20] == 12'hB0E | io_decode_0_inst[31:20] == 12'hC0E | io_decode_0_inst[31:20] == 12'h32F | io_decode_0_inst[31:20] == 12'hB0F | io_decode_0_inst[31:20] == 12'hC0F | io_decode_0_inst[31:20] == 12'h330 | io_decode_0_inst[31:20] == 12'hB10 | io_decode_0_inst[31:20] == 12'hC10 | io_decode_0_inst[31:20] == 12'h331 | io_decode_0_inst[31:20] == 12'hB11 | io_decode_0_inst[31:20] == 12'hC11 | io_decode_0_inst[31:20] == 12'h332 | io_decode_0_inst[31:20] == 12'hB12 | io_decode_0_inst[31:20] == 12'hC12 | io_decode_0_inst[31:20] == 12'h333 | io_decode_0_inst[31:20] == 12'hB13 | io_decode_0_inst[31:20] == 12'hC13 | io_decode_0_inst[31:20] == 12'h334 | io_decode_0_inst[31:20] == 12'hB14 | io_decode_0_inst[31:20] == 12'hC14 | io_decode_0_inst[31:20] == 12'h335 | io_decode_0_inst[31:20] == 12'hB15 | io_decode_0_inst[31:20] == 12'hC15 | io_decode_0_inst[31:20] == 12'h336 | io_decode_0_inst[31:20] == 12'hB16 | io_decode_0_inst[31:20] == 12'hC16 | io_decode_0_inst[31:20] == 12'h337 | io_decode_0_inst[31:20] == 12'hB17 | io_decode_0_inst[31:20] == 12'hC17 | io_decode_0_inst[31:20] == 12'h338 | io_decode_0_inst[31:20] == 12'hB18 | io_decode_0_inst[31:20] == 12'hC18 | io_decode_0_inst[31:20] == 12'h339 | io_decode_0_inst[31:20] == 12'hB19 | io_decode_0_inst[31:20] == 12'hC19 | io_decode_0_inst[31:20] == 12'h33A | io_decode_0_inst[31:20] == 12'hB1A | io_decode_0_inst[31:20] == 12'hC1A | io_decode_0_inst[31:20] == 12'h33B | io_decode_0_inst[31:20] == 12'hB1B | io_decode_0_inst[31:20] == 12'hC1B | io_decode_0_inst[31:20] == 12'h33C | io_decode_0_inst[31:20] == 12'hB1C | io_decode_0_inst[31:20] == 12'hC1C | io_decode_0_inst[31:20] == 12'h33D | io_decode_0_inst[31:20] == 12'hB1D | io_decode_0_inst[31:20] == 12'hC1D | io_decode_0_inst[31:20] == 12'h33E | io_decode_0_inst[31:20] == 12'hB1E | io_decode_0_inst[31:20] == 12'hC1E | io_decode_0_inst[31:20] == 12'h33F | io_decode_0_inst[31:20] == 12'hB1F | io_decode_0_inst[31:20] == 12'hC1F
| io_decode_0_inst[31:20] == 12'h306 | io_decode_0_inst[31:20] == 12'hC00 | io_decode_0_inst[31:20] == 12'hC02 | io_decode_0_inst[31:20] == 12'h30A | io_decode_0_inst[31:20] == 12'h100 | io_decode_0_inst[31:20] == 12'h144 | io_decode_0_inst[31:20] == 12'h104 | io_decode_0_inst[31:20] == 12'h140 | io_decode_0_inst[31:20] == 12'h142 | io_decode_0_inst[31:20] == 12'h143 | _io_decode_0_virtual_access_illegal_T_24 | io_decode_0_inst[31:20] == 12'h141 | io_decode_0_inst[31:20] == 12'h105 | io_decode_0_inst[31:20] == 12'h106 | io_decode_0_inst[31:20] == 12'h303 | io_decode_0_inst[31:20] == 12'h302 | io_decode_0_inst[31:20] == 12'h10A | io_decode_0_inst[31:20] == 12'h3A0 | io_decode_0_inst[31:20] == 12'h3A2 | io_decode_0_inst[31:20] == 12'h3B0 | io_decode_0_inst[31:20] == 12'h3B1 | io_decode_0_inst[31:20] == 12'h3B2 | io_decode_0_inst[31:20] == 12'h3B3 | io_decode_0_inst[31:20] == 12'h3B4 | io_decode_0_inst[31:20] == 12'h3B5 | io_decode_0_inst[31:20] == 12'h3B6 | io_decode_0_inst[31:20] == 12'h3B7 | io_decode_0_inst[31:20] == 12'h3B8 | io_decode_0_inst[31:20] == 12'h3B9 | io_decode_0_inst[31:20] == 12'h3BA | io_decode_0_inst[31:20] == 12'h3BB | io_decode_0_inst[31:20] == 12'h3BC | io_decode_0_inst[31:20] == 12'h3BD | io_decode_0_inst[31:20] == 12'h3BE | io_decode_0_inst[31:20] == 12'h3BF | io_decode_0_inst[31:20] == 12'h7C1 | io_decode_0_inst[31:20] == 12'hF12 | io_decode_0_inst[31:20] == 12'hF11 | io_decode_0_inst[31:20] == 12'hF13 | io_decode_0_inst[31:20] == 12'hF15; // @[CSR.scala:897:27, :899:{93,111}]
wire _io_decode_0_system_illegal_T = reg_mstatus_prv < io_decode_0_inst[29:28]; // @[CSR.scala:190:36, :395:28, :897:27, :920:42, :923:28]
wire [5:0] io_decode_0_read_illegal_invInputs = ~(io_decode_0_inst[31:26]); // @[pla.scala:78:21]
wire [11:0] io_decode_0_write_flush_addr_m = {io_decode_0_inst[31:30], io_decode_0_inst[29:20] | 10'h300}; // @[CSR.scala:897:27, :932:25]
wire [63:0] cause = insn_call ? {60'h0, {2'h0, reg_mstatus_prv[0] & reg_mstatus_v ? 2'h2 : reg_mstatus_prv} - 4'h8} : insn_break ? 64'h3 : io_cause; // @[CSR.scala:395:28, :893:83, :945:105, :959:{8,40,45,65}, :960:14]
wire _causeIsDebugTrigger_T_2 = cause[7:0] == 8'hE; // @[CSR.scala:959:8, :961:25, :963:53]
wire causeIsDebugInt = cause[63] & _causeIsDebugTrigger_T_2; // @[CSR.scala:959:8, :963:{30,39,53}]
wire causeIsDebugTrigger = ~(cause[63]) & _causeIsDebugTrigger_T_2; // @[CSR.scala:959:8, :963:{30,53}, :964:{29,44}]
wire [3:0] _causeIsDebugBreak_T_4 = {reg_dcsr_ebreakm, 1'h0, reg_dcsr_ebreaks, reg_dcsr_ebreaku} >> reg_mstatus_prv; // @[CSR.scala:395:28, :403:25, :965:{62,134}]
wire trapToDebug = reg_singleStepped | causeIsDebugInt | causeIsDebugTrigger | ~(cause[63]) & insn_break & _causeIsDebugBreak_T_4[0] | reg_debug; // @[CSR.scala:482:26, :486:30, :893:83, :959:8, :963:{30,39}, :964:44, :965:{27,42,56,134}, :966:{56,75,98,119}]
wire [63:0] _GEN_17 = {58'h0, cause[5:0]}; // @[CSR.scala:959:8, :962:31, :970:100]
wire [63:0] _delegate_T_3 = _GEN_1 >> _GEN_17; // @[CSR.scala:498:38, :970:100]
wire [63:0] _delegate_T_5 = _GEN_2 >> _GEN_17; // @[CSR.scala:502:38, :970:{100,132}]
wire delegate = ~(reg_mstatus_prv[1]) & (cause[63] ? _delegate_T_3[0] : _delegate_T_5[0]); // @[CSR.scala:395:28, :620:51, :959:8, :963:30, :970:{66,72,100,132}]
wire [63:0] _delegateVS_T_2 = 64'h0 >> _GEN_17; // @[CSR.scala:970:100, :971:80]
wire [63:0] _delegateVS_T_4 = 64'h0 >> _GEN_17; // @[CSR.scala:970:100, :971:112]
wire delegateVS = reg_mstatus_v & delegate & (cause[63] ? _delegateVS_T_2[0] : _delegateVS_T_4[0]); // @[CSR.scala:395:28, :959:8, :963:30, :970:66, :971:{34,46,52,80,112}]
wire [39:0] notDebugTVec_base = delegate ? (delegateVS ? 40'h0 : {reg_stvec[38], _read_stvec_T_5}) : {8'h0, _read_mtvec_T_5}; // @[package.scala:132:{15,38}, :138:15, :174:35]
wire _exception_T = insn_call | insn_break; // @[CSR.scala:893:83, :1000:24]
assign io_singleStep_0 = reg_dcsr_step & ~reg_debug; // @[CSR.scala:403:25, :482:26, :927:45, :1001:34]
wire exception = _exception_T | io_exception; // @[CSR.scala:1000:24, :1020:43] |
Generate the Verilog code corresponding to the following Chisel files.
File MSHR.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import freechips.rocketchip.tilelink._
import TLPermissions._
import TLMessages._
import MetaData._
import chisel3.PrintableHelper
import chisel3.experimental.dataview._
class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val a = Valid(new SourceARequest(params))
val b = Valid(new SourceBRequest(params))
val c = Valid(new SourceCRequest(params))
val d = Valid(new SourceDRequest(params))
val e = Valid(new SourceERequest(params))
val x = Valid(new SourceXRequest(params))
val dir = Valid(new DirectoryWrite(params))
val reload = Bool() // get next request via allocate (if any)
}
class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val way = UInt(params.wayBits.W)
val blockB = Bool()
val nestB = Bool()
val blockC = Bool()
val nestC = Bool()
}
class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
val b_toN = Bool() // nested Probes may unhit us
val b_toB = Bool() // nested Probes may demote us
val b_clr_dirty = Bool() // nested Probes clear dirty
val c_set_dirty = Bool() // nested Releases MAY set dirty
}
sealed trait CacheState
{
val code = CacheState.index.U
CacheState.index = CacheState.index + 1
}
object CacheState
{
var index = 0
}
case object S_INVALID extends CacheState
case object S_BRANCH extends CacheState
case object S_BRANCH_C extends CacheState
case object S_TIP extends CacheState
case object S_TIP_C extends CacheState
case object S_TIP_CD extends CacheState
case object S_TIP_D extends CacheState
case object S_TRUNK_C extends CacheState
case object S_TRUNK_CD extends CacheState
class MSHR(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle
val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup
val status = Valid(new MSHRStatus(params))
val schedule = Decoupled(new ScheduleRequest(params))
val sinkc = Flipped(Valid(new SinkCResponse(params)))
val sinkd = Flipped(Valid(new SinkDResponse(params)))
val sinke = Flipped(Valid(new SinkEResponse(params)))
val nestedwb = Flipped(new NestedWriteback(params))
})
val request_valid = RegInit(false.B)
val request = Reg(new FullRequest(params))
val meta_valid = RegInit(false.B)
val meta = Reg(new DirectoryResult(params))
// Define which states are valid
when (meta_valid) {
when (meta.state === INVALID) {
assert (!meta.clients.orR)
assert (!meta.dirty)
}
when (meta.state === BRANCH) {
assert (!meta.dirty)
}
when (meta.state === TRUNK) {
assert (meta.clients.orR)
assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one
}
when (meta.state === TIP) {
// noop
}
}
// Completed transitions (s_ = scheduled), (w_ = waiting)
val s_rprobe = RegInit(true.B) // B
val w_rprobeackfirst = RegInit(true.B)
val w_rprobeacklast = RegInit(true.B)
val s_release = RegInit(true.B) // CW w_rprobeackfirst
val w_releaseack = RegInit(true.B)
val s_pprobe = RegInit(true.B) // B
val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1]
val s_flush = RegInit(true.B) // X w_releaseack
val w_grantfirst = RegInit(true.B)
val w_grantlast = RegInit(true.B)
val w_grant = RegInit(true.B) // first | last depending on wormhole
val w_pprobeackfirst = RegInit(true.B)
val w_pprobeacklast = RegInit(true.B)
val w_pprobeack = RegInit(true.B) // first | last depending on wormhole
val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*)
val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD
val s_execute = RegInit(true.B) // D w_pprobeack, w_grant
val w_grantack = RegInit(true.B)
val s_writeback = RegInit(true.B) // W w_*
// [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall)
// However, inB and outC are higher priority than outB, so s_release and s_pprobe
// may be safely issued while blockB. Thus we must NOT try to schedule the
// potentially stuck s_acquire with either of them (scheduler is all or none).
// Meta-data that we discover underway
val sink = Reg(UInt(params.outer.bundle.sinkBits.W))
val gotT = Reg(Bool())
val bad_grant = Reg(Bool())
val probes_done = Reg(UInt(params.clientBits.W))
val probes_toN = Reg(UInt(params.clientBits.W))
val probes_noT = Reg(Bool())
// When a nested transaction completes, update our meta data
when (meta_valid && meta.state =/= INVALID &&
io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) {
when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B }
when (io.nestedwb.c_set_dirty) { meta.dirty := true.B }
when (io.nestedwb.b_toB) { meta.state := BRANCH }
when (io.nestedwb.b_toN) { meta.hit := false.B }
}
// Scheduler status
io.status.valid := request_valid
io.status.bits.set := request.set
io.status.bits.tag := request.tag
io.status.bits.way := meta.way
io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst)
io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst
// The above rules ensure we will block and not nest an outer probe while still doing our
// own inner probes. Thus every probe wakes exactly one MSHR.
io.status.bits.blockC := !meta_valid
io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst)
// The w_grantfirst in nestC is necessary to deal with:
// acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock
// ... this is possible because the release+probe can be for same set, but different tag
// We can only demand: block, nest, or queue
assert (!io.status.bits.nestB || !io.status.bits.blockB)
assert (!io.status.bits.nestC || !io.status.bits.blockC)
// Scheduler requests
val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack
io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe
io.schedule.bits.b.valid := !s_rprobe || !s_pprobe
io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst)
io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant
io.schedule.bits.e.valid := !s_grantack && w_grantfirst
io.schedule.bits.x.valid := !s_flush && w_releaseack
io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait)
io.schedule.bits.reload := no_wait
io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid ||
io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid ||
io.schedule.bits.dir.valid
// Schedule completions
when (io.schedule.ready) {
s_rprobe := true.B
when (w_rprobeackfirst) { s_release := true.B }
s_pprobe := true.B
when (s_release && s_pprobe) { s_acquire := true.B }
when (w_releaseack) { s_flush := true.B }
when (w_pprobeackfirst) { s_probeack := true.B }
when (w_grantfirst) { s_grantack := true.B }
when (w_pprobeack && w_grant) { s_execute := true.B }
when (no_wait) { s_writeback := true.B }
// Await the next operation
when (no_wait) {
request_valid := false.B
meta_valid := false.B
}
}
// Resulting meta-data
val final_meta_writeback = WireInit(meta)
val req_clientBit = params.clientBit(request.source)
val req_needT = needT(request.opcode, request.param)
val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm
val meta_no_clients = !meta.clients.orR
val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT)
when (request.prio(2) && (!params.firstLevel).B) { // always a hit
final_meta_writeback.dirty := meta.dirty || request.opcode(0)
final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state)
final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U)
final_meta_writeback.hit := true.B // chained requests are hits
} .elsewhen (request.control && params.control.B) { // request.prio(0)
when (meta.hit) {
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := meta.clients & ~probes_toN
}
final_meta_writeback.hit := false.B
} .otherwise {
final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2)
final_meta_writeback.state := Mux(req_needT,
Mux(req_acquire, TRUNK, TIP),
Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH),
MuxLookup(meta.state, 0.U(2.W))(Seq(
INVALID -> BRANCH,
BRANCH -> BRANCH,
TRUNK -> TIP,
TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP)))))
final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) |
Mux(req_acquire, req_clientBit, 0.U)
final_meta_writeback.tag := request.tag
final_meta_writeback.hit := true.B
}
when (bad_grant) {
when (meta.hit) {
// upgrade failed (B -> T)
assert (!meta_valid || meta.state === BRANCH)
final_meta_writeback.hit := true.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := BRANCH
final_meta_writeback.clients := meta.clients & ~probes_toN
} .otherwise {
// failed N -> (T or B)
final_meta_writeback.hit := false.B
final_meta_writeback.dirty := false.B
final_meta_writeback.state := INVALID
final_meta_writeback.clients := 0.U
}
}
val invalid = Wire(new DirectoryEntry(params))
invalid.dirty := false.B
invalid.state := INVALID
invalid.clients := 0.U
invalid.tag := 0.U
// Just because a client says BtoT, by the time we process the request he may be N.
// Therefore, we must consult our own meta-data state to confirm he owns the line still.
val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR
// The client asking us to act is proof they don't have permissions.
val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U)
io.schedule.bits.a.bits.tag := request.tag
io.schedule.bits.a.bits.set := request.set
io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB)
io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U ||
!(request.opcode === PutFullData || request.opcode === AcquirePerm)
io.schedule.bits.a.bits.source := 0.U
io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB)))
io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag)
io.schedule.bits.b.bits.set := request.set
io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client
io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release)
io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN)
io.schedule.bits.c.bits.source := 0.U
io.schedule.bits.c.bits.tag := meta.tag
io.schedule.bits.c.bits.set := request.set
io.schedule.bits.c.bits.way := meta.way
io.schedule.bits.c.bits.dirty := meta.dirty
io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request
io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param,
MuxLookup(request.param, request.param)(Seq(
NtoB -> Mux(req_promoteT, NtoT, NtoB),
BtoT -> Mux(honour_BtoT, BtoT, NtoT),
NtoT -> NtoT)))
io.schedule.bits.d.bits.sink := 0.U
io.schedule.bits.d.bits.way := meta.way
io.schedule.bits.d.bits.bad := bad_grant
io.schedule.bits.e.bits.sink := sink
io.schedule.bits.x.bits.fail := false.B
io.schedule.bits.dir.bits.set := request.set
io.schedule.bits.dir.bits.way := meta.way
io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback))
// Coverage of state transitions
def cacheState(entry: DirectoryEntry, hit: Bool) = {
val out = WireDefault(0.U)
val c = entry.clients.orR
val d = entry.dirty
switch (entry.state) {
is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) }
is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) }
is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) }
is (INVALID) { out := S_INVALID.code }
}
when (!hit) { out := S_INVALID.code }
out
}
val p = !params.lastLevel // can be probed
val c = !params.firstLevel // can be acquired
val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read)
val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist
val f = params.control // flush control register exists
val cfg = (p, c, m, r, f)
val b = r || p // can reach branch state (via probe downgrade or read-only device)
// The cache must be used for something or we would not be here
require(c || m)
val evict = cacheState(meta, !meta.hit)
val before = cacheState(meta, meta.hit)
val after = cacheState(final_meta_writeback, true.B)
def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}")
} else {
assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}")
}
if (cover && f) {
params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}")
} else {
assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}")
}
}
def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}")
} else {
assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}")
}
}
when ((!s_release && w_rprobeackfirst) && io.schedule.ready) {
eviction(S_BRANCH, b) // MMIO read to read-only device
eviction(S_BRANCH_C, b && c) // you need children to become C
eviction(S_TIP, true) // MMIO read || clean release can lead to this state
eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
eviction(S_TIP_D, true) // MMIO write || dirty release lead here
eviction(S_TRUNK_C, c) // acquire for write
eviction(S_TRUNK_CD, c) // dirty release then reacquire
}
when ((!s_writeback && no_wait) && io.schedule.ready) {
transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state
transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches
transition(S_INVALID, S_TIP, m) // MMIO read
transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_INVALID, S_TIP_D, m) // MMIO write
transition(S_INVALID, S_TRUNK_C, c) // acquire
transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions)
transition(S_BRANCH, S_BRANCH_C, b && c) // acquire
transition(S_BRANCH, S_TIP, b && m) // prefetch write
transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH, S_TIP_D, b && m) // MMIO write
transition(S_BRANCH, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_BRANCH_C, S_INVALID, b && c && p)
transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional)
transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write
transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write
transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire
transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately
transition(S_TIP, S_INVALID, p)
transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe
transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write
transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately
transition(S_TIP, S_TRUNK_C, c) // acquire
transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately
transition(S_TIP_C, S_INVALID, c && p)
transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional)
transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write
transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients
transition(S_TIP_C, S_TRUNK_C, c) // acquire
transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty
transition(S_TIP_D, S_INVALID, p)
transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe
transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared
transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead
transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired
transition(S_TIP_D, S_TRUNK_CD, c) // acquire
transition(S_TIP_CD, S_INVALID, c && p)
transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe
transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional)
transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire
transition(S_TIP_CD, S_TRUNK_CD, c) // acquire
transition(S_TRUNK_C, S_INVALID, c && p)
transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe
transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional)
transition(S_TRUNK_C, S_TIP_C, c) // bounce shared
transition(S_TRUNK_C, S_TIP_D, c) // dirty release
transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared
transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce
transition(S_TRUNK_CD, S_INVALID, c && p)
transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe
transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional)
transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead
transition(S_TRUNK_CD, S_TIP_D, c) // dirty release
transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared
transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire
}
// Handle response messages
val probe_bit = params.clientBit(io.sinkc.bits.source)
val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client)
val probe_toN = isToN(io.sinkc.bits.param)
if (!params.firstLevel) when (io.sinkc.valid) {
params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B")
params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B")
// Caution: the probe matches us only in set.
// We would never allow an outer probe to nest until both w_[rp]probeack complete, so
// it is safe to just unguardedly update the probe FSM.
probes_done := probes_done | probe_bit
probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U)
probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT
w_rprobeackfirst := w_rprobeackfirst || last_probe
w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last)
w_pprobeackfirst := w_pprobeackfirst || last_probe
w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last)
// Allow wormhole routing from sinkC if the first request beat has offset 0
val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U)
w_pprobeack := w_pprobeack || set_pprobeack
params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data")
params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data")
// However, meta-data updates need to be done more cautiously
when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!!
}
when (io.sinkd.valid) {
when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) {
sink := io.sinkd.bits.sink
w_grantfirst := true.B
w_grantlast := io.sinkd.bits.last
// Record if we need to prevent taking ownership
bad_grant := io.sinkd.bits.denied
// Allow wormhole routing for requests whose first beat has offset 0
w_grant := request.offset === 0.U || io.sinkd.bits.last
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data")
params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data")
gotT := io.sinkd.bits.param === toT
}
.elsewhen (io.sinkd.bits.opcode === ReleaseAck) {
w_releaseack := true.B
}
}
when (io.sinke.valid) {
w_grantack := true.B
}
// Bootstrap new requests
val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits)
val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits)
val new_request = Mux(io.allocate.valid, allocate_as_full, request)
val new_needT = needT(new_request.opcode, new_request.param)
val new_clientBit = params.clientBit(new_request.source)
val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U)
val prior = cacheState(final_meta_writeback, true.B)
def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) {
if (cover) {
params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}")
} else {
assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}")
}
}
when (io.allocate.valid && io.allocate.bits.repeat) {
bypass(S_INVALID, f || p) // Can lose permissions (probe/flush)
bypass(S_BRANCH, b) // MMIO read to read-only device
bypass(S_BRANCH_C, b && c) // you need children to become C
bypass(S_TIP, true) // MMIO read || clean release can lead to this state
bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client
bypass(S_TIP_D, true) // MMIO write || dirty release lead here
bypass(S_TRUNK_C, c) // acquire for write
bypass(S_TRUNK_CD, c) // dirty release then reacquire
}
when (io.allocate.valid) {
assert (!request_valid || (no_wait && io.schedule.fire))
request_valid := true.B
request := io.allocate.bits
}
// Create execution plan
when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) {
meta_valid := true.B
meta := new_meta
probes_done := 0.U
probes_toN := 0.U
probes_noT := false.B
gotT := false.B
bad_grant := false.B
// These should already be either true or turning true
// We clear them here explicitly to simplify the mux tree
s_rprobe := true.B
w_rprobeackfirst := true.B
w_rprobeacklast := true.B
s_release := true.B
w_releaseack := true.B
s_pprobe := true.B
s_acquire := true.B
s_flush := true.B
w_grantfirst := true.B
w_grantlast := true.B
w_grant := true.B
w_pprobeackfirst := true.B
w_pprobeacklast := true.B
w_pprobeack := true.B
s_probeack := true.B
s_grantack := true.B
s_execute := true.B
w_grantack := true.B
s_writeback := true.B
// For C channel requests (ie: Release[Data])
when (new_request.prio(2) && (!params.firstLevel).B) {
s_execute := false.B
// Do we need to go dirty?
when (new_request.opcode(0) && !new_meta.dirty) {
s_writeback := false.B
}
// Does our state change?
when (isToB(new_request.param) && new_meta.state === TRUNK) {
s_writeback := false.B
}
// Do our clients change?
when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) {
s_writeback := false.B
}
assert (new_meta.hit)
}
// For X channel requests (ie: flush)
.elsewhen (new_request.control && params.control.B) { // new_request.prio(0)
s_flush := false.B
// Do we need to actually do something?
when (new_meta.hit) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
}
// For A channel requests
.otherwise { // new_request.prio(0) && !new_request.control
s_execute := false.B
// Do we need an eviction?
when (!new_meta.hit && new_meta.state =/= INVALID) {
s_release := false.B
w_releaseack := false.B
// Do we need to shoot-down inner caches?
when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) {
s_rprobe := false.B
w_rprobeackfirst := false.B
w_rprobeacklast := false.B
}
}
// Do we need an acquire?
when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) {
s_acquire := false.B
w_grantfirst := false.B
w_grantlast := false.B
w_grant := false.B
s_grantack := false.B
s_writeback := false.B
}
// Do we need a probe?
when ((!params.firstLevel).B && (new_meta.hit &&
(new_needT || new_meta.state === TRUNK) &&
(new_meta.clients & ~new_skipProbe) =/= 0.U)) {
s_pprobe := false.B
w_pprobeackfirst := false.B
w_pprobeacklast := false.B
w_pprobeack := false.B
s_writeback := false.B
}
// Do we need a grantack?
when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) {
w_grantack := false.B
s_writeback := false.B
}
// Becomes dirty?
when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) {
s_writeback := false.B
}
}
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module MSHR_1( // @[MSHR.scala:84:7]
input clock, // @[MSHR.scala:84:7]
input reset, // @[MSHR.scala:84:7]
input io_allocate_valid, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_0, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_1, // @[MSHR.scala:86:14]
input io_allocate_bits_prio_2, // @[MSHR.scala:86:14]
input io_allocate_bits_control, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14]
input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14]
input [6:0] io_allocate_bits_source, // @[MSHR.scala:86:14]
input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14]
input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14]
input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14]
input io_allocate_bits_repeat, // @[MSHR.scala:86:14]
input io_directory_valid, // @[MSHR.scala:86:14]
input io_directory_bits_dirty, // @[MSHR.scala:86:14]
input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14]
input io_directory_bits_clients, // @[MSHR.scala:86:14]
input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14]
input io_directory_bits_hit, // @[MSHR.scala:86:14]
input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14]
output io_status_valid, // @[MSHR.scala:86:14]
output [9:0] io_status_bits_set, // @[MSHR.scala:86:14]
output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14]
output [2:0] io_status_bits_way, // @[MSHR.scala:86:14]
output io_status_bits_blockB, // @[MSHR.scala:86:14]
output io_status_bits_nestB, // @[MSHR.scala:86:14]
output io_status_bits_blockC, // @[MSHR.scala:86:14]
output io_status_bits_nestC, // @[MSHR.scala:86:14]
input io_schedule_ready, // @[MSHR.scala:86:14]
output io_schedule_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_a_valid, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14]
output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14]
output io_schedule_bits_b_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14]
output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14]
output io_schedule_bits_c_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14]
output io_schedule_bits_d_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14]
output [6:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14]
output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14]
output io_schedule_bits_e_valid, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14]
output io_schedule_bits_x_valid, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14]
output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14]
output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14]
output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14]
output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14]
output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14]
output io_schedule_bits_reload, // @[MSHR.scala:86:14]
input io_sinkc_valid, // @[MSHR.scala:86:14]
input io_sinkc_bits_last, // @[MSHR.scala:86:14]
input [9:0] io_sinkc_bits_set, // @[MSHR.scala:86:14]
input [12:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14]
input [6:0] io_sinkc_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14]
input io_sinkc_bits_data, // @[MSHR.scala:86:14]
input io_sinkd_valid, // @[MSHR.scala:86:14]
input io_sinkd_bits_last, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14]
input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14]
input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14]
input io_sinkd_bits_denied, // @[MSHR.scala:86:14]
input io_sinke_valid, // @[MSHR.scala:86:14]
input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14]
input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14]
input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14]
input io_nestedwb_b_toN, // @[MSHR.scala:86:14]
input io_nestedwb_b_toB, // @[MSHR.scala:86:14]
input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14]
input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14]
);
wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38]
wire final_meta_writeback_clients; // @[MSHR.scala:215:38]
wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38]
wire final_meta_writeback_dirty; // @[MSHR.scala:215:38]
wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7]
wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7]
wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7]
wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7]
wire [6:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7]
wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7]
wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7]
wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7]
wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7]
wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7]
wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7]
wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7]
wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7]
wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7]
wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7]
wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7]
wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7]
wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7]
wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7]
wire [9:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7]
wire [12:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7]
wire [6:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7]
wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7]
wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7]
wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7]
wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7]
wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7]
wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7]
wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7]
wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7]
wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7]
wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7]
wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7]
wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7]
wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7]
wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7]
wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68]
wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80]
wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21]
wire invalid_clients = 1'h0; // @[MSHR.scala:268:21]
wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137]
wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137]
wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11]
wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21]
wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21]
wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70]
wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34]
wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34]
wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34]
wire [6:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34]
wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34]
wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34]
wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34]
wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40]
wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93]
wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28]
wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39]
wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105]
wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55]
wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91]
wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41]
wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41]
wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41]
wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51]
wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64]
wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41]
wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41]
wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57]
wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41]
wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43]
wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40]
wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66]
wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41]
wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41]
wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41]
wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41]
wire no_wait; // @[MSHR.scala:183:83]
wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7]
wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7]
wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockB_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestB_0; // @[MSHR.scala:84:7]
wire io_status_bits_blockC_0; // @[MSHR.scala:84:7]
wire io_status_bits_nestC_0; // @[MSHR.scala:84:7]
wire io_status_valid_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7]
wire [6:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7]
wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7]
wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7]
wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7]
wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7]
wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7]
wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7]
wire io_schedule_valid_0; // @[MSHR.scala:84:7]
reg request_valid; // @[MSHR.scala:97:30]
assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30]
reg request_prio_0; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20]
reg request_prio_1; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20]
reg request_prio_2; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20]
reg request_control; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_opcode; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20]
reg [2:0] request_param; // @[MSHR.scala:98:20]
reg [2:0] request_size; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20]
reg [6:0] request_source; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20]
reg [12:0] request_tag; // @[MSHR.scala:98:20]
assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_offset; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20]
reg [5:0] request_put; // @[MSHR.scala:98:20]
assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20]
reg [9:0] request_set; // @[MSHR.scala:98:20]
assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20]
reg meta_valid; // @[MSHR.scala:99:27]
reg meta_dirty; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17]
reg [1:0] meta_state; // @[MSHR.scala:100:17]
reg meta_clients; // @[MSHR.scala:100:17]
wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39]
wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27]
reg [12:0] meta_tag; // @[MSHR.scala:100:17]
assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17]
reg meta_hit; // @[MSHR.scala:100:17]
reg [2:0] meta_way; // @[MSHR.scala:100:17]
assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17]
wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38]
reg s_rprobe; // @[MSHR.scala:121:33]
reg w_rprobeackfirst; // @[MSHR.scala:122:33]
reg w_rprobeacklast; // @[MSHR.scala:123:33]
reg s_release; // @[MSHR.scala:124:33]
reg w_releaseack; // @[MSHR.scala:125:33]
reg s_pprobe; // @[MSHR.scala:126:33]
reg s_acquire; // @[MSHR.scala:127:33]
reg s_flush; // @[MSHR.scala:128:33]
reg w_grantfirst; // @[MSHR.scala:129:33]
reg w_grantlast; // @[MSHR.scala:130:33]
reg w_grant; // @[MSHR.scala:131:33]
reg w_pprobeackfirst; // @[MSHR.scala:132:33]
reg w_pprobeacklast; // @[MSHR.scala:133:33]
reg w_pprobeack; // @[MSHR.scala:134:33]
reg s_grantack; // @[MSHR.scala:136:33]
reg s_execute; // @[MSHR.scala:137:33]
reg w_grantack; // @[MSHR.scala:138:33]
reg s_writeback; // @[MSHR.scala:139:33]
reg [2:0] sink; // @[MSHR.scala:147:17]
assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17]
reg gotT; // @[MSHR.scala:148:17]
reg bad_grant; // @[MSHR.scala:149:22]
assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22]
reg probes_done; // @[MSHR.scala:150:24]
reg probes_toN; // @[MSHR.scala:151:23]
reg probes_noT; // @[MSHR.scala:152:23]
wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28]
wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45]
wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62]
wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}]
wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82]
wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}]
wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103]
wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}]
assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}]
assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40]
wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39]
wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}]
wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}]
wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96]
assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}]
assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93]
assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28]
assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28]
wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43]
wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64]
wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}]
wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85]
wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}]
assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}]
assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39]
wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33]
wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}]
wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}]
assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}]
assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83]
wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31]
wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}]
assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}]
assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55]
wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31]
wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44]
assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}]
assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41]
wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32]
wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}]
assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}]
assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64]
wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31]
wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}]
assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}]
assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57]
wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31]
assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}]
assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43]
wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31]
assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}]
assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40]
wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34]
wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}]
wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70]
wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}]
assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}]
assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66]
wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49]
wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}]
wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}]
wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49]
wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}]
assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}]
assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105]
wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71]
wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71]
wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71]
wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27]
wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71]
wire final_meta_writeback_hit; // @[MSHR.scala:215:38]
wire req_clientBit = request_source == 7'h20; // @[Parameters.scala:46:9]
wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12]
wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12]
wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _req_needT_T_2; // @[Parameters.scala:270:13]
assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13]
wire _excluded_client_T_6; // @[Parameters.scala:279:117]
assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117]
wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42]
wire _req_needT_T_3; // @[Parameters.scala:270:42]
assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42]
wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11]
assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11]
wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42]
wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _req_needT_T_6; // @[Parameters.scala:271:14]
assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14]
wire _req_acquire_T; // @[MSHR.scala:219:36]
assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14]
wire _excluded_client_T_1; // @[Parameters.scala:279:12]
assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12]
wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52]
wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89]
wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52]
wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}]
wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}]
wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81]
wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}]
wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}]
wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}]
wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65]
wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}]
wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55]
wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78]
wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78]
assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78]
wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70]
wire _evict_T_2; // @[MSHR.scala:317:26]
assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _before_T_1; // @[MSHR.scala:317:26]
assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26]
wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}]
wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}]
wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43]
assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43]
wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43]
wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75]
wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}]
wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}]
wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54]
wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}]
wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45]
wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}]
wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}]
wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40]
wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40]
assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40]
wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65]
assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65]
wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41]
wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}]
wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72]
wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}]
wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70]
wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70]
assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70]
wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53]
assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53]
wire _evict_T_1; // @[MSHR.scala:317:26]
assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire _before_T; // @[MSHR.scala:317:26]
assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26]
wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70]
wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70]
wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55]
wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70]
wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70]
wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66]
wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}]
wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}]
wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9]
wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40]
assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30]
wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54]
wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}]
assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21]
assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21]
assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36]
assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36]
wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9]
wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}]
wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}]
wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38]
wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}]
wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}]
wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}]
wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106]
wire excluded_client = _excluded_client_T_9 & req_clientBit; // @[Parameters.scala:46:9]
wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56]
wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70]
assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}]
wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51]
wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55]
wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52]
wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}]
wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}]
assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38]
assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91]
wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42]
wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70]
wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}]
assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}]
assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41]
wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42]
assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}]
assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41]
wire _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53]
assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}]
assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51]
assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41]
assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41]
assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}]
assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41]
wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42]
wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53]
wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53]
wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89]
wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53]
wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79]
assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79]
assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41]
wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42]
assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}]
assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}]
assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41]
assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41]
wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32]
wire [3:0] evict; // @[MSHR.scala:314:26]
wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32]
wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32]
wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32]
assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32]
assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32]
wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39]
wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39]
assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39]
assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39]
wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76]
wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76]
assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76]
assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76]
wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32]
assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] before_0; // @[MSHR.scala:314:26]
wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32]
wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26]
wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26]
wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11]
assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}]
wire [3:0] after; // @[MSHR.scala:314:26]
wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26]
wire _after_T; // @[MSHR.scala:317:26]
assign _after_T = _GEN_9; // @[MSHR.scala:317:26]
wire _prior_T; // @[MSHR.scala:317:26]
assign _prior_T = _GEN_9; // @[MSHR.scala:317:26]
wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32]
wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26]
wire _after_T_1; // @[MSHR.scala:317:26]
assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire _prior_T_1; // @[MSHR.scala:317:26]
assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26]
wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32]
wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32]
assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32]
assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32]
wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39]
wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39]
assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39]
assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39]
wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76]
wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76]
assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76]
assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76]
wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26]
wire _after_T_3; // @[MSHR.scala:317:26]
assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26]
wire _prior_T_3; // @[MSHR.scala:317:26]
assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26]
assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire probe_bit = io_sinkc_bits_source_0 == 7'h20; // @[Parameters.scala:46:9]
wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9]
wire _last_probe_T; // @[MSHR.scala:459:33]
assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33]
wire _probes_done_T; // @[MSHR.scala:467:32]
assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32]
wire _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66]
wire _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}]
wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}]
wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11]
wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43]
wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}]
wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75]
wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}]
wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9]
wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}]
wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53]
wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}]
wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42]
wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55]
wire _w_rprobeacklast_T; // @[MSHR.scala:471:55]
assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55]
wire _w_pprobeacklast_T; // @[MSHR.scala:473:55]
assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55]
wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}]
wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42]
wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}]
wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77]
wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}]
wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}]
wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32]
wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33]
wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}]
wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35]
wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40]
wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}]
wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [6:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24]
wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12]
wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}]
wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13]
wire _new_needT_T_2; // @[Parameters.scala:270:13]
assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13]
wire _new_skipProbe_T_5; // @[Parameters.scala:279:117]
assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117]
wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42]
wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}]
wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33]
wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14]
wire _new_needT_T_6; // @[Parameters.scala:271:14]
assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14]
wire _new_skipProbe_T; // @[Parameters.scala:279:12]
assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12]
wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52]
wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}]
wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89]
wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}]
wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80]
wire new_clientBit = new_request_source == 7'h20; // @[Parameters.scala:46:9]
wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50]
wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}]
wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87]
wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}]
wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}]
wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9]
wire [3:0] prior; // @[MSHR.scala:314:26]
wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32]
wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26]
wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}]
assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26]
wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28] |
Generate the Verilog code corresponding to the following Chisel files.
File RouteComputer.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.experimental.decode.{TruthTable, decoder}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import freechips.rocketchip.rocket.DecodeLogic
import constellation.channel._
import constellation.routing.{FlowRoutingBundle, FlowRoutingInfo}
import constellation.noc.{HasNoCParams}
class RouteComputerReq(implicit val p: Parameters) extends Bundle with HasNoCParams {
val src_virt_id = UInt(virtualChannelBits.W)
val flow = new FlowRoutingBundle
}
class RouteComputerResp(
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle
with HasRouterOutputParams {
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
class RouteComputer(
val routerParams: RouterParams,
val inParams: Seq[ChannelParams],
val outParams: Seq[ChannelParams],
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module
with HasRouterParams
with HasRouterInputParams
with HasRouterOutputParams
with HasNoCParams {
val io = IO(new Bundle {
val req = MixedVec(allInParams.map { u => Flipped(Decoupled(new RouteComputerReq)) })
val resp = MixedVec(allInParams.map { u => Output(new RouteComputerResp(outParams, egressParams)) })
})
(io.req zip io.resp).zipWithIndex.map { case ((req, resp), i) =>
req.ready := true.B
if (outParams.size == 0) {
assert(!req.valid)
resp.vc_sel := DontCare
} else {
def toUInt(t: (Int, FlowRoutingInfo)): UInt = {
val l2 = (BigInt(t._1) << req.bits.flow.vnet_id .getWidth) | t._2.vNetId
val l3 = ( l2 << req.bits.flow.ingress_node .getWidth) | t._2.ingressNode
val l4 = ( l3 << req.bits.flow.ingress_node_id.getWidth) | t._2.ingressNodeId
val l5 = ( l4 << req.bits.flow.egress_node .getWidth) | t._2.egressNode
val l6 = ( l5 << req.bits.flow.egress_node_id .getWidth) | t._2.egressNodeId
l6.U(req.bits.getWidth.W)
}
val flow = req.bits.flow
val table = allInParams(i).possibleFlows.toSeq.distinct.map { pI =>
allInParams(i).channelRoutingInfos.map { cI =>
var row: String = "b"
(0 until nOutputs).foreach { o =>
(0 until outParams(o).nVirtualChannels).foreach { outVId =>
row = row + (if (routingRelation(cI, outParams(o).channelRoutingInfos(outVId), pI)) "1" else "0")
}
}
((cI.vc, pI), row)
}
}.flatten
val addr = req.bits.asUInt
val width = outParams.map(_.nVirtualChannels).reduce(_+_)
val decoded = if (table.size > 0) {
val truthTable = TruthTable(
table.map { e => (BitPat(toUInt(e._1)), BitPat(e._2)) },
BitPat("b" + "?" * width)
)
Reverse(decoder(addr, truthTable))
} else {
0.U(width.W)
}
var idx = 0
(0 until nAllOutputs).foreach { o =>
if (o < nOutputs) {
(0 until outParams(o).nVirtualChannels).foreach { outVId =>
resp.vc_sel(o)(outVId) := decoded(idx)
idx += 1
}
} else {
resp.vc_sel(o)(0) := false.B
}
}
}
}
}
| module RouteComputer_52( // @[RouteComputer.scala:29:7]
input [3:0] io_req_1_bits_flow_egress_node, // @[RouteComputer.scala:40:14]
input [2:0] io_req_1_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_0, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_1, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_2, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_3, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_4, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_5, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_6, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_7, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_8, // @[RouteComputer.scala:40:14]
output io_resp_1_vc_sel_0_9 // @[RouteComputer.scala:40:14]
);
wire [1:0] _GEN = {~(io_req_1_bits_flow_egress_node_id[2]), ~(io_req_1_bits_flow_egress_node[3])}; // @[pla.scala:78:21, :98:53]
assign io_resp_1_vc_sel_0_0 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_1 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_2 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_3 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_4 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_5 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_6 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_7 = 1'h0; // @[RouteComputer.scala:29:7]
assign io_resp_1_vc_sel_0_8 = &_GEN; // @[pla.scala:98:{53,70}]
assign io_resp_1_vc_sel_0_9 = &_GEN; // @[pla.scala:98:{53,70}]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_44( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input io_in_d_bits_source, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
reg [31:0] address; // @[Monitor.scala:391:22]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg source_1; // @[Monitor.scala:541:22]
reg [3:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [1:0] inflight; // @[Monitor.scala:614:27]
reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [7:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire a_set = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire _GEN = io_in_d_valid & d_first_1; // @[Monitor.scala:674:26]
wire _GEN_0 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:36:7, :673:46, :674:74]
wire _GEN_1 = _GEN & _GEN_0; // @[Monitor.scala:673:46, :674:{26,71,74}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [1:0] inflight_1; // @[Monitor.scala:726:35]
reg [7:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _GEN_2 = io_in_d_valid & d_first_2 & io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:36:7, :673:46, :784:26, :788:70]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_65( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [11:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [11:0] io_in_d_bits_source // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [11:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [11:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data = 64'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count = 1'h0; // @[Edges.scala:234:25]
wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27]
wire c_first_count = 1'h0; // @[Edges.scala:234:25]
wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21]
wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last = 1'h1; // @[Edges.scala:232:33]
wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33]
wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_first_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_first_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_first_WIRE_2_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_first_WIRE_3_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_set_wo_ready_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_set_wo_ready_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_set_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_set_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_opcodes_set_interm_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_opcodes_set_interm_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_sizes_set_interm_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_sizes_set_interm_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_opcodes_set_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_opcodes_set_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_sizes_set_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_sizes_set_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_probe_ack_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_probe_ack_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_probe_ack_WIRE_2_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_probe_ack_WIRE_3_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _same_cycle_resp_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _same_cycle_resp_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _same_cycle_resp_WIRE_2_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _same_cycle_resp_WIRE_3_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _same_cycle_resp_WIRE_4_bits_source = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _same_cycle_resp_WIRE_5_bits_source = 12'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_beats1_decode_T_2 = 3'h0; // @[package.scala:243:46]
wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [32769:0] _c_sizes_set_T_1 = 32770'h0; // @[Monitor.scala:768:52]
wire [14:0] _c_opcodes_set_T = 15'h0; // @[Monitor.scala:767:79]
wire [14:0] _c_sizes_set_T = 15'h0; // @[Monitor.scala:768:77]
wire [32770:0] _c_opcodes_set_T_1 = 32771'h0; // @[Monitor.scala:767:54]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [4095:0] _c_set_wo_ready_T = 4096'h1; // @[OneHot.scala:58:35]
wire [4095:0] _c_set_T = 4096'h1; // @[OneHot.scala:58:35]
wire [8255:0] c_opcodes_set = 8256'h0; // @[Monitor.scala:740:34]
wire [8255:0] c_sizes_set = 8256'h0; // @[Monitor.scala:741:34]
wire [2063:0] c_set = 2064'h0; // @[Monitor.scala:738:34]
wire [2063:0] c_set_wo_ready = 2064'h0; // @[Monitor.scala:739:34]
wire [2:0] _c_first_beats1_decode_T_1 = 3'h7; // @[package.scala:243:76]
wire [5:0] _c_first_beats1_decode_T = 6'h7; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [11:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [11:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 12'h810; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [5:0] _GEN = 6'h7 << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [2:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {18'h0, io_in_a_bits_address_0[2:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [2:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = &io_in_a_bits_size_0; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [11:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [11:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [11:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [11:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [11:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [11:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [11:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [11:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [11:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [11:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 12'h810; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_665 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_665; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_665; // @[Decoupled.scala:51:35]
wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
reg a_first_counter; // @[Edges.scala:229:27]
wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28]
wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [1:0] size; // @[Monitor.scala:389:22]
reg [11:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_733 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_733; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_733; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_733; // @[Decoupled.scala:51:35]
wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35]
wire [5:0] _GEN_0 = 6'h7 << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [2:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
reg d_first_counter; // @[Edges.scala:229:27]
wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28]
wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] size_1; // @[Monitor.scala:540:22]
reg [11:0] source_1; // @[Monitor.scala:541:22]
reg [2063:0] inflight; // @[Monitor.scala:614:27]
reg [8255:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [8255:0] inflight_sizes; // @[Monitor.scala:618:33]
wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
reg a_first_counter_1; // @[Edges.scala:229:27]
wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire [2063:0] a_set; // @[Monitor.scala:626:34]
wire [2063:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [8255:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [8255:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [14:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [14:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [14:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [14:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [14:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [14:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [14:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [14:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [14:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [8255:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [8255:0] _a_opcode_lookup_T_6 = {8252'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [8255:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[8255:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [8255:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [8255:0] _a_size_lookup_T_6 = {8252'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [8255:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[8255:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [4095:0] _GEN_2 = 4096'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [4095:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [4095:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[2063:0] : 2064'h0; // @[OneHot.scala:58:35]
wire _T_598 = _T_665 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_598 ? _a_set_T[2063:0] : 2064'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [14:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [14:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [14:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [32770:0] _a_opcodes_set_T_1 = {32767'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[8255:0] : 8256'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [32769:0] _a_sizes_set_T_1 = {32767'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[8255:0] : 8256'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [2063:0] d_clr; // @[Monitor.scala:664:34]
wire [2063:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [8255:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [8255:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [4095:0] _GEN_5 = 4096'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [4095:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [4095:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [4095:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [4095:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[2063:0] : 2064'h0; // @[OneHot.scala:58:35]
wire _T_613 = _T_733 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_613 ? _d_clr_T[2063:0] : 2064'h0; // @[OneHot.scala:58:35]
wire [32782:0] _d_opcodes_clr_T_5 = 32783'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[8255:0] : 8256'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [32782:0] _d_sizes_clr_T_5 = 32783'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[8255:0] : 8256'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [2063:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [2063:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [2063:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [8255:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [8255:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [8255:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [8255:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [8255:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [8255:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [2063:0] inflight_1; // @[Monitor.scala:726:35]
wire [2063:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [8255:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [8255:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [8255:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [8255:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
reg d_first_counter_2; // @[Edges.scala:229:27]
wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28]
wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [8255:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [8255:0] _c_opcode_lookup_T_6 = {8252'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [8255:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[8255:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [8255:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [8255:0] _c_size_lookup_T_6 = {8252'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [8255:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[8255:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [2063:0] d_clr_1; // @[Monitor.scala:774:34]
wire [2063:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [8255:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [8255:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_709 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_709 & d_release_ack_1 ? _d_clr_wo_ready_T_1[2063:0] : 2064'h0; // @[OneHot.scala:58:35]
wire _T_691 = _T_733 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_691 ? _d_clr_T_1[2063:0] : 2064'h0; // @[OneHot.scala:58:35]
wire [32782:0] _d_opcodes_clr_T_11 = 32783'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_691 ? _d_opcodes_clr_T_11[8255:0] : 8256'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [32782:0] _d_sizes_clr_T_11 = 32783'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_691 ? _d_sizes_clr_T_11[8255:0] : 8256'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 12'h0; // @[Monitor.scala:36:7, :795:113]
wire [2063:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [2063:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [8255:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [8255:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [8255:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [8255:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
File issue-slot.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Issue Slot Logic
//--------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot.
// TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores.
// TODO Disable ldspec for FP queue.
package boom.v4.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v4.common._
import boom.v4.util._
class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle
{
val valid = Output(Bool())
val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely?
val request = Output(Bool())
val grant = Input(Bool())
val iss_uop = Output(new MicroOp())
val in_uop = Input(Valid(new MicroOp())) // if valid, this WILL overwrite an entry!
val out_uop = Output(new MicroOp())
val brupdate = Input(new BrUpdateInfo())
val kill = Input(Bool()) // pipeline flush
val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant)
val squash_grant = Input(Bool())
val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new Wakeup)))
val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W)))
val child_rebusys = Input(UInt(aluWidth.W))
}
class IssueSlot(val numWakeupPorts: Int, val isMem: Boolean, val isFp: Boolean)(implicit p: Parameters)
extends BoomModule
{
val io = IO(new IssueSlotIO(numWakeupPorts))
val slot_valid = RegInit(false.B)
val slot_uop = Reg(new MicroOp())
val next_valid = WireInit(slot_valid)
val next_uop = WireInit(UpdateBrMask(io.brupdate, slot_uop))
val killed = IsKilledByBranch(io.brupdate, io.kill, slot_uop)
io.valid := slot_valid
io.out_uop := next_uop
io.will_be_valid := next_valid && !killed
when (io.kill) {
slot_valid := false.B
} .elsewhen (io.in_uop.valid) {
slot_valid := true.B
} .elsewhen (io.clear) {
slot_valid := false.B
} .otherwise {
slot_valid := next_valid && !killed
}
when (io.in_uop.valid) {
slot_uop := io.in_uop.bits
assert (!slot_valid || io.clear || io.kill)
} .otherwise {
slot_uop := next_uop
}
// Wakeups
next_uop.iw_p1_bypass_hint := false.B
next_uop.iw_p2_bypass_hint := false.B
next_uop.iw_p3_bypass_hint := false.B
next_uop.iw_p1_speculative_child := 0.U
next_uop.iw_p2_speculative_child := 0.U
val rebusied_prs1 = WireInit(false.B)
val rebusied_prs2 = WireInit(false.B)
val rebusied = rebusied_prs1 || rebusied_prs2
val prs1_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs1 }
val prs2_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs2 }
val prs3_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs3 }
val prs1_wakeups = (io.wakeup_ports zip prs1_matches).map { case (w,m) => w.valid && m }
val prs2_wakeups = (io.wakeup_ports zip prs2_matches).map { case (w,m) => w.valid && m }
val prs3_wakeups = (io.wakeup_ports zip prs3_matches).map { case (w,m) => w.valid && m }
val prs1_rebusys = (io.wakeup_ports zip prs1_matches).map { case (w,m) => w.bits.rebusy && m }
val prs2_rebusys = (io.wakeup_ports zip prs2_matches).map { case (w,m) => w.bits.rebusy && m }
val bypassables = io.wakeup_ports.map { w => w.bits.bypassable }
val speculative_masks = io.wakeup_ports.map { w => w.bits.speculative_mask }
when (prs1_wakeups.reduce(_||_)) {
next_uop.prs1_busy := false.B
next_uop.iw_p1_speculative_child := Mux1H(prs1_wakeups, speculative_masks)
next_uop.iw_p1_bypass_hint := Mux1H(prs1_wakeups, bypassables)
}
when ((prs1_rebusys.reduce(_||_) || ((io.child_rebusys & slot_uop.iw_p1_speculative_child) =/= 0.U)) &&
slot_uop.lrs1_rtype === RT_FIX) {
next_uop.prs1_busy := true.B
rebusied_prs1 := true.B
}
when (prs2_wakeups.reduce(_||_)) {
next_uop.prs2_busy := false.B
next_uop.iw_p2_speculative_child := Mux1H(prs2_wakeups, speculative_masks)
next_uop.iw_p2_bypass_hint := Mux1H(prs2_wakeups, bypassables)
}
when ((prs2_rebusys.reduce(_||_) || ((io.child_rebusys & slot_uop.iw_p2_speculative_child) =/= 0.U)) &&
slot_uop.lrs2_rtype === RT_FIX) {
next_uop.prs2_busy := true.B
rebusied_prs2 := true.B
}
when (prs3_wakeups.reduce(_||_)) {
next_uop.prs3_busy := false.B
next_uop.iw_p3_bypass_hint := Mux1H(prs3_wakeups, bypassables)
}
when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === slot_uop.ppred) {
next_uop.ppred_busy := false.B
}
val iss_ready = !slot_uop.prs1_busy && !slot_uop.prs2_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && !(slot_uop.prs3_busy && isFp.B)
val agen_ready = (slot_uop.fu_code(FC_AGEN) && !slot_uop.prs1_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && isMem.B)
val dgen_ready = (slot_uop.fu_code(FC_DGEN) && !slot_uop.prs2_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && isMem.B)
io.request := slot_valid && !slot_uop.iw_issued && (
iss_ready || agen_ready || dgen_ready
)
io.iss_uop := slot_uop
// Update state for current micro-op based on grant
next_uop.iw_issued := false.B
next_uop.iw_issued_partial_agen := false.B
next_uop.iw_issued_partial_dgen := false.B
when (io.grant && !io.squash_grant) {
next_uop.iw_issued := true.B
}
if (isMem) {
when (slot_uop.fu_code(FC_AGEN) && slot_uop.fu_code(FC_DGEN)) {
when (agen_ready) {
// Issue the AGEN, next slot entry is a DGEN
when (io.grant && !io.squash_grant) {
next_uop.iw_issued_partial_agen := true.B
}
io.iss_uop.fu_code(FC_AGEN) := true.B
io.iss_uop.fu_code(FC_DGEN) := false.B
} .otherwise {
// Issue the DGEN, next slot entry is the AGEN
when (io.grant && !io.squash_grant) {
next_uop.iw_issued_partial_dgen := true.B
}
io.iss_uop.fu_code(FC_AGEN) := false.B
io.iss_uop.fu_code(FC_DGEN) := true.B
io.iss_uop.imm_sel := IS_N
io.iss_uop.prs1 := slot_uop.prs2
io.iss_uop.lrs1_rtype := slot_uop.lrs2_rtype
io.iss_uop.iw_p1_bypass_hint := slot_uop.iw_p2_bypass_hint
}
} .elsewhen (slot_uop.fu_code(FC_DGEN)) {
io.iss_uop.imm_sel := IS_N
io.iss_uop.prs1 := slot_uop.prs2
io.iss_uop.lrs1_rtype := slot_uop.lrs2_rtype
io.iss_uop.iw_p1_bypass_hint := slot_uop.iw_p2_bypass_hint
}
io.iss_uop.lrs2_rtype := RT_X
io.iss_uop.prs2 := io.iss_uop.prs1 // helps with DCE
}
when (slot_valid && slot_uop.iw_issued) {
next_valid := rebusied
if (isMem) {
when (slot_uop.iw_issued_partial_agen) {
next_valid := true.B
when (!rebusied_prs1) {
next_uop.fu_code(FC_AGEN) := false.B
next_uop.fu_code(FC_DGEN) := true.B
}
} .elsewhen (slot_uop.iw_issued_partial_dgen) {
next_valid := true.B
when (!rebusied_prs2) {
next_uop.fu_code(FC_AGEN) := true.B
next_uop.fu_code(FC_DGEN) := false.B
}
}
}
}
}
| module IssueSlot_31( // @[issue-slot.scala:49:7]
input clock, // @[issue-slot.scala:49:7]
input reset, // @[issue-slot.scala:49:7]
output io_valid, // @[issue-slot.scala:52:14]
output io_will_be_valid, // @[issue-slot.scala:52:14]
output io_request, // @[issue-slot.scala:52:14]
input io_grant, // @[issue-slot.scala:52:14]
output [31:0] io_iss_uop_inst, // @[issue-slot.scala:52:14]
output [31:0] io_iss_uop_debug_inst, // @[issue-slot.scala:52:14]
output io_iss_uop_is_rvc, // @[issue-slot.scala:52:14]
output [39:0] io_iss_uop_debug_pc, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_0, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_1, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_2, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_3, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_0, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_1, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_2, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_3, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_4, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_5, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_6, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_7, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_8, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_9, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_issued, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_dis_col_sel, // @[issue-slot.scala:52:14]
output [11:0] io_iss_uop_br_mask, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_br_tag, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_br_type, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sfb, // @[issue-slot.scala:52:14]
output io_iss_uop_is_fence, // @[issue-slot.scala:52:14]
output io_iss_uop_is_fencei, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sfence, // @[issue-slot.scala:52:14]
output io_iss_uop_is_amo, // @[issue-slot.scala:52:14]
output io_iss_uop_is_eret, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
output io_iss_uop_is_rocc, // @[issue-slot.scala:52:14]
output io_iss_uop_is_mov, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ftq_idx, // @[issue-slot.scala:52:14]
output io_iss_uop_edge_inst, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_pc_lob, // @[issue-slot.scala:52:14]
output io_iss_uop_taken, // @[issue-slot.scala:52:14]
output io_iss_uop_imm_rename, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_imm_sel, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_pimm, // @[issue-slot.scala:52:14]
output [19:0] io_iss_uop_imm_packed, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_op1_sel, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_op2_sel, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_rob_idx, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_ldq_idx, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_stq_idx, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_rxq_idx, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_pdst, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs1, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs2, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs3, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ppred, // @[issue-slot.scala:52:14]
output io_iss_uop_prs1_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_prs2_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_prs3_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_ppred_busy, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_stale_pdst, // @[issue-slot.scala:52:14]
output io_iss_uop_exception, // @[issue-slot.scala:52:14]
output [63:0] io_iss_uop_exc_cause, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_mem_cmd, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_mem_size, // @[issue-slot.scala:52:14]
output io_iss_uop_mem_signed, // @[issue-slot.scala:52:14]
output io_iss_uop_uses_ldq, // @[issue-slot.scala:52:14]
output io_iss_uop_uses_stq, // @[issue-slot.scala:52:14]
output io_iss_uop_is_unique, // @[issue-slot.scala:52:14]
output io_iss_uop_flush_on_commit, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_csr_cmd, // @[issue-slot.scala:52:14]
output io_iss_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_ldst, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs1, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs2, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs3, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_dst_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
output io_iss_uop_frs3_en, // @[issue-slot.scala:52:14]
output io_iss_uop_fcn_dw, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_fcn_op, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_val, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_fp_rm, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_typ, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
output io_iss_uop_bp_debug_if, // @[issue-slot.scala:52:14]
output io_iss_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_debug_fsrc, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_in_uop_valid, // @[issue-slot.scala:52:14]
input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:52:14]
input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_0, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_0, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_4, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_5, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_6, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_7, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_8, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_9, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_issued, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_dis_col_sel, // @[issue-slot.scala:52:14]
input [11:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_br_type, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sfb, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_fence, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_fencei, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sfence, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_amo, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_eret, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_rocc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:52:14]
input io_in_uop_bits_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:52:14]
input io_in_uop_bits_taken, // @[issue-slot.scala:52:14]
input io_in_uop_bits_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_op2_sel, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ppred, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_exception, // @[issue-slot.scala:52:14]
input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:52:14]
input io_in_uop_bits_mem_signed, // @[issue-slot.scala:52:14]
input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:52:14]
input io_in_uop_bits_uses_stq, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_unique, // @[issue-slot.scala:52:14]
input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_csr_cmd, // @[issue-slot.scala:52:14]
input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_in_uop_bits_frs3_en, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_fcn_op, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_typ, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:52:14]
output [31:0] io_out_uop_inst, // @[issue-slot.scala:52:14]
output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:52:14]
output io_out_uop_is_rvc, // @[issue-slot.scala:52:14]
output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_0, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_1, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_2, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_3, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_0, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_1, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_2, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_3, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_4, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_5, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_6, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_7, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_8, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_9, // @[issue-slot.scala:52:14]
output io_out_uop_iw_issued, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_dis_col_sel, // @[issue-slot.scala:52:14]
output [11:0] io_out_uop_br_mask, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_br_tag, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_br_type, // @[issue-slot.scala:52:14]
output io_out_uop_is_sfb, // @[issue-slot.scala:52:14]
output io_out_uop_is_fence, // @[issue-slot.scala:52:14]
output io_out_uop_is_fencei, // @[issue-slot.scala:52:14]
output io_out_uop_is_sfence, // @[issue-slot.scala:52:14]
output io_out_uop_is_amo, // @[issue-slot.scala:52:14]
output io_out_uop_is_eret, // @[issue-slot.scala:52:14]
output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
output io_out_uop_is_rocc, // @[issue-slot.scala:52:14]
output io_out_uop_is_mov, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ftq_idx, // @[issue-slot.scala:52:14]
output io_out_uop_edge_inst, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:52:14]
output io_out_uop_taken, // @[issue-slot.scala:52:14]
output io_out_uop_imm_rename, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_imm_sel, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_pimm, // @[issue-slot.scala:52:14]
output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_op1_sel, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_op2_sel, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_rob_idx, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_ldq_idx, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_stq_idx, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_pdst, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs1, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs2, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs3, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ppred, // @[issue-slot.scala:52:14]
output io_out_uop_prs1_busy, // @[issue-slot.scala:52:14]
output io_out_uop_prs2_busy, // @[issue-slot.scala:52:14]
output io_out_uop_prs3_busy, // @[issue-slot.scala:52:14]
output io_out_uop_ppred_busy, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_stale_pdst, // @[issue-slot.scala:52:14]
output io_out_uop_exception, // @[issue-slot.scala:52:14]
output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:52:14]
output io_out_uop_mem_signed, // @[issue-slot.scala:52:14]
output io_out_uop_uses_ldq, // @[issue-slot.scala:52:14]
output io_out_uop_uses_stq, // @[issue-slot.scala:52:14]
output io_out_uop_is_unique, // @[issue-slot.scala:52:14]
output io_out_uop_flush_on_commit, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_csr_cmd, // @[issue-slot.scala:52:14]
output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_ldst, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
output io_out_uop_frs3_en, // @[issue-slot.scala:52:14]
output io_out_uop_fcn_dw, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_fcn_op, // @[issue-slot.scala:52:14]
output io_out_uop_fp_val, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_fp_rm, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_typ, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
output io_out_uop_bp_debug_if, // @[issue-slot.scala:52:14]
output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input [11:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:52:14]
input [11:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:52:14]
input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [11:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_br_type, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_eret, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_taken, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_mispredict, // @[issue-slot.scala:52:14]
input io_brupdate_b2_taken, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:52:14]
input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:52:14]
input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:52:14]
input io_kill, // @[issue-slot.scala:52:14]
input io_clear, // @[issue-slot.scala:52:14]
input io_squash_grant, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_0_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_0_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_0_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [11:0] io_wakeup_ports_0_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_0_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_0_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_bypassable, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_speculative_mask, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_rebusy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_1_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_1_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_1_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [11:0] io_wakeup_ports_1_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_1_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_1_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_2_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_2_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_2_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [11:0] io_wakeup_ports_2_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_2_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_2_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_2_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_2_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_2_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_2_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_3_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_3_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_3_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [11:0] io_wakeup_ports_3_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_3_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_3_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_3_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_3_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_3_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_3_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input [1:0] io_child_rebusys // @[issue-slot.scala:52:14]
);
wire [11:0] next_uop_out_br_mask; // @[util.scala:104:23]
wire io_grant_0 = io_grant; // @[issue-slot.scala:49:7]
wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_0_0 = io_in_uop_bits_iq_type_0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_1_0 = io_in_uop_bits_iq_type_1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_2_0 = io_in_uop_bits_iq_type_2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_3_0 = io_in_uop_bits_iq_type_3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_0_0 = io_in_uop_bits_fu_code_0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_1_0 = io_in_uop_bits_fu_code_1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_2_0 = io_in_uop_bits_fu_code_2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_3_0 = io_in_uop_bits_fu_code_3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_4_0 = io_in_uop_bits_fu_code_4; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_5_0 = io_in_uop_bits_fu_code_5; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_6_0 = io_in_uop_bits_fu_code_6; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_7_0 = io_in_uop_bits_fu_code_7; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_8_0 = io_in_uop_bits_fu_code_8; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_9_0 = io_in_uop_bits_fu_code_9; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_0 = io_in_uop_bits_iw_issued; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_iw_p1_speculative_child_0 = io_in_uop_bits_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_iw_p2_speculative_child_0 = io_in_uop_bits_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p1_bypass_hint_0 = io_in_uop_bits_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p2_bypass_hint_0 = io_in_uop_bits_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p3_bypass_hint_0 = io_in_uop_bits_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_dis_col_sel_0 = io_in_uop_bits_dis_col_sel; // @[issue-slot.scala:49:7]
wire [11:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_br_type_0 = io_in_uop_bits_br_type; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sfence_0 = io_in_uop_bits_is_sfence; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_eret_0 = io_in_uop_bits_is_eret; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_rocc_0 = io_in_uop_bits_is_rocc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_mov_0 = io_in_uop_bits_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_imm_rename_0 = io_in_uop_bits_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_imm_sel_0 = io_in_uop_bits_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_pimm_0 = io_in_uop_bits_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_op1_sel_0 = io_in_uop_bits_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_op2_sel_0 = io_in_uop_bits_op2_sel; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ldst_0 = io_in_uop_bits_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_wen_0 = io_in_uop_bits_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren1_0 = io_in_uop_bits_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren2_0 = io_in_uop_bits_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren3_0 = io_in_uop_bits_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_swap12_0 = io_in_uop_bits_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_swap23_0 = io_in_uop_bits_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_ctrl_typeTagIn_0 = io_in_uop_bits_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_ctrl_typeTagOut_0 = io_in_uop_bits_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fromint_0 = io_in_uop_bits_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_toint_0 = io_in_uop_bits_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fastpipe_0 = io_in_uop_bits_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fma_0 = io_in_uop_bits_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_div_0 = io_in_uop_bits_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_sqrt_0 = io_in_uop_bits_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_wflags_0 = io_in_uop_bits_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_vec_0 = io_in_uop_bits_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_csr_cmd_0 = io_in_uop_bits_csr_cmd; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fcn_dw_0 = io_in_uop_bits_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_fcn_op_0 = io_in_uop_bits_fcn_op; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_fp_rm_0 = io_in_uop_bits_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_typ_0 = io_in_uop_bits_fp_typ; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:49:7]
wire [11:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:49:7]
wire [11:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:49:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_0_0 = io_brupdate_b2_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_1_0 = io_brupdate_b2_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_2_0 = io_brupdate_b2_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_3_0 = io_brupdate_b2_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_0_0 = io_brupdate_b2_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_1_0 = io_brupdate_b2_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_2_0 = io_brupdate_b2_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_3_0 = io_brupdate_b2_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_4_0 = io_brupdate_b2_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_5_0 = io_brupdate_b2_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_6_0 = io_brupdate_b2_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_7_0 = io_brupdate_b2_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_8_0 = io_brupdate_b2_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_9_0 = io_brupdate_b2_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_0 = io_brupdate_b2_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen_0 = io_brupdate_b2_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen_0 = io_brupdate_b2_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_iw_p1_speculative_child_0 = io_brupdate_b2_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_iw_p2_speculative_child_0 = io_brupdate_b2_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint_0 = io_brupdate_b2_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint_0 = io_brupdate_b2_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint_0 = io_brupdate_b2_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_dis_col_sel_0 = io_brupdate_b2_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [11:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_br_type_0 = io_brupdate_b2_uop_br_type; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sfence_0 = io_brupdate_b2_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_eret_0 = io_brupdate_b2_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_rocc_0 = io_brupdate_b2_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_mov_0 = io_brupdate_b2_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_imm_rename_0 = io_brupdate_b2_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_imm_sel_0 = io_brupdate_b2_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_pimm_0 = io_brupdate_b2_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_op1_sel_0 = io_brupdate_b2_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_op2_sel_0 = io_brupdate_b2_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst_0 = io_brupdate_b2_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_wen_0 = io_brupdate_b2_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1_0 = io_brupdate_b2_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2_0 = io_brupdate_b2_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3_0 = io_brupdate_b2_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12_0 = io_brupdate_b2_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23_0 = io_brupdate_b2_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn_0 = io_brupdate_b2_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut_0 = io_brupdate_b2_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint_0 = io_brupdate_b2_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_toint_0 = io_brupdate_b2_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe_0 = io_brupdate_b2_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fma_0 = io_brupdate_b2_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_div_0 = io_brupdate_b2_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt_0 = io_brupdate_b2_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags_0 = io_brupdate_b2_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_vec_0 = io_brupdate_b2_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_csr_cmd_0 = io_brupdate_b2_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fcn_dw_0 = io_brupdate_b2_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_fcn_op_0 = io_brupdate_b2_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_fp_rm_0 = io_brupdate_b2_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_typ_0 = io_brupdate_b2_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:49:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:49:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:49:7]
wire io_kill_0 = io_kill; // @[issue-slot.scala:49:7]
wire io_clear_0 = io_clear; // @[issue-slot.scala:49:7]
wire io_squash_grant_0 = io_squash_grant; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_0_bits_uop_inst_0 = io_wakeup_ports_0_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_0_bits_uop_debug_inst_0 = io_wakeup_ports_0_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_rvc_0 = io_wakeup_ports_0_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_0_bits_uop_debug_pc_0 = io_wakeup_ports_0_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_0_0 = io_wakeup_ports_0_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_1_0 = io_wakeup_ports_0_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_2_0 = io_wakeup_ports_0_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_3_0 = io_wakeup_ports_0_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_0_0 = io_wakeup_ports_0_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_1_0 = io_wakeup_ports_0_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_2_0 = io_wakeup_ports_0_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_3_0 = io_wakeup_ports_0_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_4_0 = io_wakeup_ports_0_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_5_0 = io_wakeup_ports_0_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_6_0 = io_wakeup_ports_0_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_7_0 = io_wakeup_ports_0_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_8_0 = io_wakeup_ports_0_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_9_0 = io_wakeup_ports_0_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_0 = io_wakeup_ports_0_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_partial_agen_0 = io_wakeup_ports_0_bits_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen_0 = io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_0_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_0_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_dis_col_sel_0 = io_wakeup_ports_0_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [11:0] io_wakeup_ports_0_bits_uop_br_mask_0 = io_wakeup_ports_0_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_br_tag_0 = io_wakeup_ports_0_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_br_type_0 = io_wakeup_ports_0_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sfb_0 = io_wakeup_ports_0_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_fence_0 = io_wakeup_ports_0_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_fencei_0 = io_wakeup_ports_0_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sfence_0 = io_wakeup_ports_0_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_amo_0 = io_wakeup_ports_0_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_eret_0 = io_wakeup_ports_0_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_0_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_rocc_0 = io_wakeup_ports_0_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_mov_0 = io_wakeup_ports_0_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ftq_idx_0 = io_wakeup_ports_0_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_edge_inst_0 = io_wakeup_ports_0_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_pc_lob_0 = io_wakeup_ports_0_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_taken_0 = io_wakeup_ports_0_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_imm_rename_0 = io_wakeup_ports_0_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_imm_sel_0 = io_wakeup_ports_0_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_pimm_0 = io_wakeup_ports_0_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_0_bits_uop_imm_packed_0 = io_wakeup_ports_0_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_op1_sel_0 = io_wakeup_ports_0_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_op2_sel_0 = io_wakeup_ports_0_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_rob_idx_0 = io_wakeup_ports_0_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_ldq_idx_0 = io_wakeup_ports_0_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_stq_idx_0 = io_wakeup_ports_0_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_rxq_idx_0 = io_wakeup_ports_0_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_pdst_0 = io_wakeup_ports_0_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs1_0 = io_wakeup_ports_0_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs2_0 = io_wakeup_ports_0_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs3_0 = io_wakeup_ports_0_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ppred_0 = io_wakeup_ports_0_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs1_busy_0 = io_wakeup_ports_0_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs2_busy_0 = io_wakeup_ports_0_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs3_busy_0 = io_wakeup_ports_0_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_ppred_busy_0 = io_wakeup_ports_0_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_stale_pdst_0 = io_wakeup_ports_0_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_exception_0 = io_wakeup_ports_0_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_0_bits_uop_exc_cause_0 = io_wakeup_ports_0_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_mem_cmd_0 = io_wakeup_ports_0_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_mem_size_0 = io_wakeup_ports_0_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_mem_signed_0 = io_wakeup_ports_0_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_uses_ldq_0 = io_wakeup_ports_0_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_uses_stq_0 = io_wakeup_ports_0_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_unique_0 = io_wakeup_ports_0_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_flush_on_commit_0 = io_wakeup_ports_0_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_csr_cmd_0 = io_wakeup_ports_0_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_0_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_ldst_0 = io_wakeup_ports_0_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs1_0 = io_wakeup_ports_0_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs2_0 = io_wakeup_ports_0_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs3_0 = io_wakeup_ports_0_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_dst_rtype_0 = io_wakeup_ports_0_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_lrs1_rtype_0 = io_wakeup_ports_0_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_lrs2_rtype_0 = io_wakeup_ports_0_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_frs3_en_0 = io_wakeup_ports_0_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fcn_dw_0 = io_wakeup_ports_0_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_fcn_op_0 = io_wakeup_ports_0_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_val_0 = io_wakeup_ports_0_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_fp_rm_0 = io_wakeup_ports_0_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_typ_0 = io_wakeup_ports_0_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_0_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_0_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_0_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_bp_debug_if_0 = io_wakeup_ports_0_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_0_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_debug_fsrc_0 = io_wakeup_ports_0_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_debug_tsrc_0 = io_wakeup_ports_0_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_bypassable_0 = io_wakeup_ports_0_bits_bypassable; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_speculative_mask_0 = io_wakeup_ports_0_bits_speculative_mask; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_rebusy_0 = io_wakeup_ports_0_bits_rebusy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_1_bits_uop_inst_0 = io_wakeup_ports_1_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_1_bits_uop_debug_inst_0 = io_wakeup_ports_1_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_rvc_0 = io_wakeup_ports_1_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_1_bits_uop_debug_pc_0 = io_wakeup_ports_1_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_0_0 = io_wakeup_ports_1_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_1_0 = io_wakeup_ports_1_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_2_0 = io_wakeup_ports_1_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_3_0 = io_wakeup_ports_1_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_0_0 = io_wakeup_ports_1_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_1_0 = io_wakeup_ports_1_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_2_0 = io_wakeup_ports_1_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_3_0 = io_wakeup_ports_1_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_4_0 = io_wakeup_ports_1_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_5_0 = io_wakeup_ports_1_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_6_0 = io_wakeup_ports_1_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_7_0 = io_wakeup_ports_1_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_8_0 = io_wakeup_ports_1_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_9_0 = io_wakeup_ports_1_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_0 = io_wakeup_ports_1_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_partial_agen_0 = io_wakeup_ports_1_bits_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen_0 = io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_1_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_1_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_dis_col_sel_0 = io_wakeup_ports_1_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [11:0] io_wakeup_ports_1_bits_uop_br_mask_0 = io_wakeup_ports_1_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_br_tag_0 = io_wakeup_ports_1_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_br_type_0 = io_wakeup_ports_1_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sfb_0 = io_wakeup_ports_1_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_fence_0 = io_wakeup_ports_1_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_fencei_0 = io_wakeup_ports_1_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sfence_0 = io_wakeup_ports_1_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_amo_0 = io_wakeup_ports_1_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_eret_0 = io_wakeup_ports_1_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_1_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_rocc_0 = io_wakeup_ports_1_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_mov_0 = io_wakeup_ports_1_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ftq_idx_0 = io_wakeup_ports_1_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_edge_inst_0 = io_wakeup_ports_1_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_pc_lob_0 = io_wakeup_ports_1_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_taken_0 = io_wakeup_ports_1_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_imm_rename_0 = io_wakeup_ports_1_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_imm_sel_0 = io_wakeup_ports_1_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_pimm_0 = io_wakeup_ports_1_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_1_bits_uop_imm_packed_0 = io_wakeup_ports_1_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_op1_sel_0 = io_wakeup_ports_1_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_op2_sel_0 = io_wakeup_ports_1_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_rob_idx_0 = io_wakeup_ports_1_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_ldq_idx_0 = io_wakeup_ports_1_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_stq_idx_0 = io_wakeup_ports_1_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_rxq_idx_0 = io_wakeup_ports_1_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_pdst_0 = io_wakeup_ports_1_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs1_0 = io_wakeup_ports_1_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs2_0 = io_wakeup_ports_1_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs3_0 = io_wakeup_ports_1_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ppred_0 = io_wakeup_ports_1_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs1_busy_0 = io_wakeup_ports_1_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs2_busy_0 = io_wakeup_ports_1_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs3_busy_0 = io_wakeup_ports_1_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_ppred_busy_0 = io_wakeup_ports_1_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_stale_pdst_0 = io_wakeup_ports_1_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_exception_0 = io_wakeup_ports_1_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_1_bits_uop_exc_cause_0 = io_wakeup_ports_1_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_mem_cmd_0 = io_wakeup_ports_1_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_mem_size_0 = io_wakeup_ports_1_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_mem_signed_0 = io_wakeup_ports_1_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_uses_ldq_0 = io_wakeup_ports_1_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_uses_stq_0 = io_wakeup_ports_1_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_unique_0 = io_wakeup_ports_1_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_flush_on_commit_0 = io_wakeup_ports_1_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_csr_cmd_0 = io_wakeup_ports_1_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_1_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_ldst_0 = io_wakeup_ports_1_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs1_0 = io_wakeup_ports_1_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs2_0 = io_wakeup_ports_1_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs3_0 = io_wakeup_ports_1_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_dst_rtype_0 = io_wakeup_ports_1_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_lrs1_rtype_0 = io_wakeup_ports_1_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_lrs2_rtype_0 = io_wakeup_ports_1_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_frs3_en_0 = io_wakeup_ports_1_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fcn_dw_0 = io_wakeup_ports_1_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_fcn_op_0 = io_wakeup_ports_1_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_val_0 = io_wakeup_ports_1_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_fp_rm_0 = io_wakeup_ports_1_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_typ_0 = io_wakeup_ports_1_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_1_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_1_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_1_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_bp_debug_if_0 = io_wakeup_ports_1_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_1_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_debug_fsrc_0 = io_wakeup_ports_1_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_debug_tsrc_0 = io_wakeup_ports_1_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_valid_0 = io_wakeup_ports_2_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_2_bits_uop_inst_0 = io_wakeup_ports_2_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_2_bits_uop_debug_inst_0 = io_wakeup_ports_2_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_rvc_0 = io_wakeup_ports_2_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_2_bits_uop_debug_pc_0 = io_wakeup_ports_2_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_0_0 = io_wakeup_ports_2_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_1_0 = io_wakeup_ports_2_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_2_0 = io_wakeup_ports_2_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_3_0 = io_wakeup_ports_2_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_0_0 = io_wakeup_ports_2_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_1_0 = io_wakeup_ports_2_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_2_0 = io_wakeup_ports_2_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_3_0 = io_wakeup_ports_2_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_4_0 = io_wakeup_ports_2_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_5_0 = io_wakeup_ports_2_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_6_0 = io_wakeup_ports_2_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_7_0 = io_wakeup_ports_2_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_8_0 = io_wakeup_ports_2_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_9_0 = io_wakeup_ports_2_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_0 = io_wakeup_ports_2_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_2_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_2_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_dis_col_sel_0 = io_wakeup_ports_2_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [11:0] io_wakeup_ports_2_bits_uop_br_mask_0 = io_wakeup_ports_2_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_2_bits_uop_br_tag_0 = io_wakeup_ports_2_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_2_bits_uop_br_type_0 = io_wakeup_ports_2_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sfb_0 = io_wakeup_ports_2_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_fence_0 = io_wakeup_ports_2_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_fencei_0 = io_wakeup_ports_2_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sfence_0 = io_wakeup_ports_2_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_amo_0 = io_wakeup_ports_2_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_eret_0 = io_wakeup_ports_2_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_2_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_rocc_0 = io_wakeup_ports_2_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_mov_0 = io_wakeup_ports_2_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ftq_idx_0 = io_wakeup_ports_2_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_edge_inst_0 = io_wakeup_ports_2_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_pc_lob_0 = io_wakeup_ports_2_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_taken_0 = io_wakeup_ports_2_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_imm_rename_0 = io_wakeup_ports_2_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_imm_sel_0 = io_wakeup_ports_2_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_pimm_0 = io_wakeup_ports_2_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_2_bits_uop_imm_packed_0 = io_wakeup_ports_2_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_op1_sel_0 = io_wakeup_ports_2_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_op2_sel_0 = io_wakeup_ports_2_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_rob_idx_0 = io_wakeup_ports_2_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_2_bits_uop_ldq_idx_0 = io_wakeup_ports_2_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_2_bits_uop_stq_idx_0 = io_wakeup_ports_2_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_rxq_idx_0 = io_wakeup_ports_2_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_pdst_0 = io_wakeup_ports_2_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs1_0 = io_wakeup_ports_2_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs2_0 = io_wakeup_ports_2_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs3_0 = io_wakeup_ports_2_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ppred_0 = io_wakeup_ports_2_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs1_busy_0 = io_wakeup_ports_2_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs2_busy_0 = io_wakeup_ports_2_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs3_busy_0 = io_wakeup_ports_2_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_ppred_busy_0 = io_wakeup_ports_2_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_stale_pdst_0 = io_wakeup_ports_2_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_exception_0 = io_wakeup_ports_2_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_2_bits_uop_exc_cause_0 = io_wakeup_ports_2_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_mem_cmd_0 = io_wakeup_ports_2_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_mem_size_0 = io_wakeup_ports_2_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_mem_signed_0 = io_wakeup_ports_2_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_uses_ldq_0 = io_wakeup_ports_2_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_uses_stq_0 = io_wakeup_ports_2_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_unique_0 = io_wakeup_ports_2_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_flush_on_commit_0 = io_wakeup_ports_2_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_csr_cmd_0 = io_wakeup_ports_2_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_2_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_ldst_0 = io_wakeup_ports_2_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs1_0 = io_wakeup_ports_2_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs2_0 = io_wakeup_ports_2_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs3_0 = io_wakeup_ports_2_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_dst_rtype_0 = io_wakeup_ports_2_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_lrs1_rtype_0 = io_wakeup_ports_2_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_lrs2_rtype_0 = io_wakeup_ports_2_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_frs3_en_0 = io_wakeup_ports_2_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fcn_dw_0 = io_wakeup_ports_2_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_fcn_op_0 = io_wakeup_ports_2_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_val_0 = io_wakeup_ports_2_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_fp_rm_0 = io_wakeup_ports_2_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_typ_0 = io_wakeup_ports_2_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_2_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_2_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_2_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_bp_debug_if_0 = io_wakeup_ports_2_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_2_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_debug_fsrc_0 = io_wakeup_ports_2_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_debug_tsrc_0 = io_wakeup_ports_2_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_valid_0 = io_wakeup_ports_3_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_3_bits_uop_inst_0 = io_wakeup_ports_3_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_3_bits_uop_debug_inst_0 = io_wakeup_ports_3_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_rvc_0 = io_wakeup_ports_3_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_3_bits_uop_debug_pc_0 = io_wakeup_ports_3_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_0_0 = io_wakeup_ports_3_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_1_0 = io_wakeup_ports_3_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_2_0 = io_wakeup_ports_3_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_3_0 = io_wakeup_ports_3_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_0_0 = io_wakeup_ports_3_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_1_0 = io_wakeup_ports_3_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_2_0 = io_wakeup_ports_3_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_3_0 = io_wakeup_ports_3_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_4_0 = io_wakeup_ports_3_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_5_0 = io_wakeup_ports_3_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_6_0 = io_wakeup_ports_3_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_7_0 = io_wakeup_ports_3_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_8_0 = io_wakeup_ports_3_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_9_0 = io_wakeup_ports_3_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_0 = io_wakeup_ports_3_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_3_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_3_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_dis_col_sel_0 = io_wakeup_ports_3_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [11:0] io_wakeup_ports_3_bits_uop_br_mask_0 = io_wakeup_ports_3_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_3_bits_uop_br_tag_0 = io_wakeup_ports_3_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_3_bits_uop_br_type_0 = io_wakeup_ports_3_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sfb_0 = io_wakeup_ports_3_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_fence_0 = io_wakeup_ports_3_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_fencei_0 = io_wakeup_ports_3_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sfence_0 = io_wakeup_ports_3_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_amo_0 = io_wakeup_ports_3_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_eret_0 = io_wakeup_ports_3_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_3_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_rocc_0 = io_wakeup_ports_3_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_mov_0 = io_wakeup_ports_3_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ftq_idx_0 = io_wakeup_ports_3_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_edge_inst_0 = io_wakeup_ports_3_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_pc_lob_0 = io_wakeup_ports_3_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_taken_0 = io_wakeup_ports_3_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_imm_rename_0 = io_wakeup_ports_3_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_imm_sel_0 = io_wakeup_ports_3_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_pimm_0 = io_wakeup_ports_3_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_3_bits_uop_imm_packed_0 = io_wakeup_ports_3_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_op1_sel_0 = io_wakeup_ports_3_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_op2_sel_0 = io_wakeup_ports_3_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_rob_idx_0 = io_wakeup_ports_3_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_3_bits_uop_ldq_idx_0 = io_wakeup_ports_3_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_3_bits_uop_stq_idx_0 = io_wakeup_ports_3_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_rxq_idx_0 = io_wakeup_ports_3_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_pdst_0 = io_wakeup_ports_3_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs1_0 = io_wakeup_ports_3_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs2_0 = io_wakeup_ports_3_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs3_0 = io_wakeup_ports_3_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ppred_0 = io_wakeup_ports_3_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs1_busy_0 = io_wakeup_ports_3_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs2_busy_0 = io_wakeup_ports_3_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs3_busy_0 = io_wakeup_ports_3_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_ppred_busy_0 = io_wakeup_ports_3_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_stale_pdst_0 = io_wakeup_ports_3_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_exception_0 = io_wakeup_ports_3_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_3_bits_uop_exc_cause_0 = io_wakeup_ports_3_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_mem_cmd_0 = io_wakeup_ports_3_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_mem_size_0 = io_wakeup_ports_3_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_mem_signed_0 = io_wakeup_ports_3_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_uses_ldq_0 = io_wakeup_ports_3_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_uses_stq_0 = io_wakeup_ports_3_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_unique_0 = io_wakeup_ports_3_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_flush_on_commit_0 = io_wakeup_ports_3_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_csr_cmd_0 = io_wakeup_ports_3_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_3_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_ldst_0 = io_wakeup_ports_3_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs1_0 = io_wakeup_ports_3_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs2_0 = io_wakeup_ports_3_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs3_0 = io_wakeup_ports_3_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_dst_rtype_0 = io_wakeup_ports_3_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_lrs1_rtype_0 = io_wakeup_ports_3_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_lrs2_rtype_0 = io_wakeup_ports_3_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_frs3_en_0 = io_wakeup_ports_3_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fcn_dw_0 = io_wakeup_ports_3_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_fcn_op_0 = io_wakeup_ports_3_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_val_0 = io_wakeup_ports_3_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_fp_rm_0 = io_wakeup_ports_3_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_typ_0 = io_wakeup_ports_3_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_3_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_3_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_3_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_bp_debug_if_0 = io_wakeup_ports_3_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_3_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_debug_fsrc_0 = io_wakeup_ports_3_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_debug_tsrc_0 = io_wakeup_ports_3_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire [1:0] io_child_rebusys_0 = io_child_rebusys; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_bypassable = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_pred_wakeup_port_valid = 1'h0; // @[issue-slot.scala:49:7]
wire next_uop_out_iw_issued_partial_agen = 1'h0; // @[util.scala:104:23]
wire next_uop_out_iw_issued_partial_dgen = 1'h0; // @[util.scala:104:23]
wire next_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:59:28]
wire prs1_rebusys_1 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_2 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_3 = 1'h0; // @[issue-slot.scala:102:91]
wire prs2_rebusys_1 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_2 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_3 = 1'h0; // @[issue-slot.scala:103:91]
wire _next_uop_iw_p1_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _next_uop_iw_p2_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _next_uop_iw_p3_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _iss_ready_T_6 = 1'h0; // @[issue-slot.scala:136:131]
wire agen_ready = 1'h0; // @[issue-slot.scala:137:114]
wire dgen_ready = 1'h0; // @[issue-slot.scala:138:114]
wire [1:0] io_wakeup_ports_1_bits_speculative_mask = 2'h0; // @[issue-slot.scala:49:7]
wire [1:0] _next_uop_iw_p1_speculative_child_T_1 = 2'h0; // @[Mux.scala:30:73]
wire [1:0] _next_uop_iw_p2_speculative_child_T_1 = 2'h0; // @[Mux.scala:30:73]
wire io_wakeup_ports_2_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire _iss_ready_T_7 = 1'h1; // @[issue-slot.scala:136:110]
wire [1:0] io_wakeup_ports_2_bits_speculative_mask = 2'h1; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_speculative_mask = 2'h2; // @[issue-slot.scala:49:7]
wire [4:0] io_pred_wakeup_port_bits = 5'h0; // @[issue-slot.scala:49:7]
wire _io_will_be_valid_T_1; // @[issue-slot.scala:65:34]
wire _io_request_T_4; // @[issue-slot.scala:140:51]
wire [31:0] next_uop_inst; // @[issue-slot.scala:59:28]
wire [31:0] next_uop_debug_inst; // @[issue-slot.scala:59:28]
wire next_uop_is_rvc; // @[issue-slot.scala:59:28]
wire [39:0] next_uop_debug_pc; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_0; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_1; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_2; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_3; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_0; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_1; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_2; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_3; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_4; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_5; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_6; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_7; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_8; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_9; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_iw_p1_speculative_child; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_iw_p2_speculative_child; // @[issue-slot.scala:59:28]
wire next_uop_iw_p1_bypass_hint; // @[issue-slot.scala:59:28]
wire next_uop_iw_p2_bypass_hint; // @[issue-slot.scala:59:28]
wire next_uop_iw_p3_bypass_hint; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_dis_col_sel; // @[issue-slot.scala:59:28]
wire [11:0] next_uop_br_mask; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_br_tag; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_br_type; // @[issue-slot.scala:59:28]
wire next_uop_is_sfb; // @[issue-slot.scala:59:28]
wire next_uop_is_fence; // @[issue-slot.scala:59:28]
wire next_uop_is_fencei; // @[issue-slot.scala:59:28]
wire next_uop_is_sfence; // @[issue-slot.scala:59:28]
wire next_uop_is_amo; // @[issue-slot.scala:59:28]
wire next_uop_is_eret; // @[issue-slot.scala:59:28]
wire next_uop_is_sys_pc2epc; // @[issue-slot.scala:59:28]
wire next_uop_is_rocc; // @[issue-slot.scala:59:28]
wire next_uop_is_mov; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ftq_idx; // @[issue-slot.scala:59:28]
wire next_uop_edge_inst; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_pc_lob; // @[issue-slot.scala:59:28]
wire next_uop_taken; // @[issue-slot.scala:59:28]
wire next_uop_imm_rename; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_imm_sel; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_pimm; // @[issue-slot.scala:59:28]
wire [19:0] next_uop_imm_packed; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_op1_sel; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_op2_sel; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ldst; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_wen; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren1; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren2; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren3; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_swap12; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_swap23; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fromint; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_toint; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fma; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_div; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_sqrt; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_wflags; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_vec; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_rob_idx; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_ldq_idx; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_stq_idx; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_rxq_idx; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_pdst; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs1; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs2; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs3; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ppred; // @[issue-slot.scala:59:28]
wire next_uop_prs1_busy; // @[issue-slot.scala:59:28]
wire next_uop_prs2_busy; // @[issue-slot.scala:59:28]
wire next_uop_prs3_busy; // @[issue-slot.scala:59:28]
wire next_uop_ppred_busy; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_stale_pdst; // @[issue-slot.scala:59:28]
wire next_uop_exception; // @[issue-slot.scala:59:28]
wire [63:0] next_uop_exc_cause; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_mem_cmd; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_mem_size; // @[issue-slot.scala:59:28]
wire next_uop_mem_signed; // @[issue-slot.scala:59:28]
wire next_uop_uses_ldq; // @[issue-slot.scala:59:28]
wire next_uop_uses_stq; // @[issue-slot.scala:59:28]
wire next_uop_is_unique; // @[issue-slot.scala:59:28]
wire next_uop_flush_on_commit; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_csr_cmd; // @[issue-slot.scala:59:28]
wire next_uop_ldst_is_rs1; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_ldst; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs1; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs2; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs3; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_dst_rtype; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_lrs1_rtype; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_lrs2_rtype; // @[issue-slot.scala:59:28]
wire next_uop_frs3_en; // @[issue-slot.scala:59:28]
wire next_uop_fcn_dw; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_fcn_op; // @[issue-slot.scala:59:28]
wire next_uop_fp_val; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_fp_rm; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_typ; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_pf_if; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_ae_if; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_ma_if; // @[issue-slot.scala:59:28]
wire next_uop_bp_debug_if; // @[issue-slot.scala:59:28]
wire next_uop_bp_xcpt_if; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_debug_fsrc; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_debug_tsrc; // @[issue-slot.scala:59:28]
wire io_iss_uop_iq_type_0_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_0_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_4_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_5_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_6_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_7_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_8_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_9_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ldst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_wen_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_swap12_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_swap23_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_ctrl_typeTagIn_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_ctrl_typeTagOut_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fromint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_toint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fastpipe_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fma_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_div_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_sqrt_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_wflags_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_vec_0; // @[issue-slot.scala:49:7]
wire [31:0] io_iss_uop_inst_0; // @[issue-slot.scala:49:7]
wire [31:0] io_iss_uop_debug_inst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_rvc_0; // @[issue-slot.scala:49:7]
wire [39:0] io_iss_uop_debug_pc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_iw_p1_speculative_child_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_iw_p2_speculative_child_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p1_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p2_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p3_bypass_hint_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_dis_col_sel_0; // @[issue-slot.scala:49:7]
wire [11:0] io_iss_uop_br_mask_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_br_tag_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_br_type_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sfb_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_fence_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_fencei_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sfence_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_amo_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_eret_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sys_pc2epc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_rocc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_mov_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ftq_idx_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_edge_inst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_pc_lob_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_taken_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_imm_rename_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_imm_sel_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_pimm_0; // @[issue-slot.scala:49:7]
wire [19:0] io_iss_uop_imm_packed_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_op1_sel_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_op2_sel_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_rob_idx_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_ldq_idx_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_stq_idx_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_rxq_idx_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_pdst_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs2_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs3_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ppred_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs1_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs2_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs3_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_ppred_busy_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_stale_pdst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_exception_0; // @[issue-slot.scala:49:7]
wire [63:0] io_iss_uop_exc_cause_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_mem_cmd_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_mem_size_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_mem_signed_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_uses_ldq_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_uses_stq_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_unique_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_flush_on_commit_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_csr_cmd_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_ldst_is_rs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_ldst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs2_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs3_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_dst_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_lrs1_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_lrs2_rtype_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_frs3_en_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fcn_dw_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_fcn_op_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_val_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_fp_rm_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_typ_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_pf_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_ae_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_ma_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_bp_debug_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_bp_xcpt_if_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_debug_fsrc_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_debug_tsrc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_0_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_0_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_4_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_5_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_6_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_7_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_8_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_9_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ldst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_wen_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_swap12_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_swap23_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_ctrl_typeTagIn_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_ctrl_typeTagOut_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fromint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_toint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fastpipe_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fma_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_div_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_sqrt_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_wflags_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_vec_0; // @[issue-slot.scala:49:7]
wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:49:7]
wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_rvc_0; // @[issue-slot.scala:49:7]
wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_iw_p1_speculative_child_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_iw_p2_speculative_child_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p1_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p2_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p3_bypass_hint_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_dis_col_sel_0; // @[issue-slot.scala:49:7]
wire [11:0] io_out_uop_br_mask_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_br_tag_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_br_type_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sfb_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_fence_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_fencei_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sfence_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_amo_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_eret_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_rocc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_mov_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:49:7]
wire io_out_uop_edge_inst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:49:7]
wire io_out_uop_taken_0; // @[issue-slot.scala:49:7]
wire io_out_uop_imm_rename_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_imm_sel_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_pimm_0; // @[issue-slot.scala:49:7]
wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_op1_sel_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_op2_sel_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_pdst_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs2_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs3_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ppred_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_exception_0; // @[issue-slot.scala:49:7]
wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:49:7]
wire io_out_uop_mem_signed_0; // @[issue-slot.scala:49:7]
wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:49:7]
wire io_out_uop_uses_stq_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_unique_0; // @[issue-slot.scala:49:7]
wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_csr_cmd_0; // @[issue-slot.scala:49:7]
wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:49:7]
wire io_out_uop_frs3_en_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fcn_dw_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_fcn_op_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_val_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_fp_rm_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_typ_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:49:7]
wire io_valid_0; // @[issue-slot.scala:49:7]
wire io_will_be_valid_0; // @[issue-slot.scala:49:7]
wire io_request_0; // @[issue-slot.scala:49:7]
reg slot_valid; // @[issue-slot.scala:55:27]
assign io_valid_0 = slot_valid; // @[issue-slot.scala:49:7, :55:27]
reg [31:0] slot_uop_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:49:7, :56:21]
wire [31:0] next_uop_out_inst = slot_uop_inst; // @[util.scala:104:23]
reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:49:7, :56:21]
wire [31:0] next_uop_out_debug_inst = slot_uop_debug_inst; // @[util.scala:104:23]
reg slot_uop_is_rvc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_rvc = slot_uop_is_rvc; // @[util.scala:104:23]
reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:49:7, :56:21]
wire [39:0] next_uop_out_debug_pc = slot_uop_debug_pc; // @[util.scala:104:23]
reg slot_uop_iq_type_0; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_0_0 = slot_uop_iq_type_0; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_0 = slot_uop_iq_type_0; // @[util.scala:104:23]
reg slot_uop_iq_type_1; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_1_0 = slot_uop_iq_type_1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_1 = slot_uop_iq_type_1; // @[util.scala:104:23]
reg slot_uop_iq_type_2; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_2_0 = slot_uop_iq_type_2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_2 = slot_uop_iq_type_2; // @[util.scala:104:23]
reg slot_uop_iq_type_3; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_3_0 = slot_uop_iq_type_3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_3 = slot_uop_iq_type_3; // @[util.scala:104:23]
reg slot_uop_fu_code_0; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_0_0 = slot_uop_fu_code_0; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_0 = slot_uop_fu_code_0; // @[util.scala:104:23]
reg slot_uop_fu_code_1; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_1_0 = slot_uop_fu_code_1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_1 = slot_uop_fu_code_1; // @[util.scala:104:23]
reg slot_uop_fu_code_2; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_2_0 = slot_uop_fu_code_2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_2 = slot_uop_fu_code_2; // @[util.scala:104:23]
reg slot_uop_fu_code_3; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_3_0 = slot_uop_fu_code_3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_3 = slot_uop_fu_code_3; // @[util.scala:104:23]
reg slot_uop_fu_code_4; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_4_0 = slot_uop_fu_code_4; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_4 = slot_uop_fu_code_4; // @[util.scala:104:23]
reg slot_uop_fu_code_5; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_5_0 = slot_uop_fu_code_5; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_5 = slot_uop_fu_code_5; // @[util.scala:104:23]
reg slot_uop_fu_code_6; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_6_0 = slot_uop_fu_code_6; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_6 = slot_uop_fu_code_6; // @[util.scala:104:23]
reg slot_uop_fu_code_7; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_7_0 = slot_uop_fu_code_7; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_7 = slot_uop_fu_code_7; // @[util.scala:104:23]
reg slot_uop_fu_code_8; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_8_0 = slot_uop_fu_code_8; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_8 = slot_uop_fu_code_8; // @[util.scala:104:23]
reg slot_uop_fu_code_9; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_9_0 = slot_uop_fu_code_9; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_9 = slot_uop_fu_code_9; // @[util.scala:104:23]
reg slot_uop_iw_issued; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_issued_0 = slot_uop_iw_issued; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_issued = slot_uop_iw_issued; // @[util.scala:104:23]
reg [1:0] slot_uop_iw_p1_speculative_child; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p1_speculative_child_0 = slot_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_iw_p1_speculative_child = slot_uop_iw_p1_speculative_child; // @[util.scala:104:23]
reg [1:0] slot_uop_iw_p2_speculative_child; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p2_speculative_child_0 = slot_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_iw_p2_speculative_child = slot_uop_iw_p2_speculative_child; // @[util.scala:104:23]
reg slot_uop_iw_p1_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p1_bypass_hint_0 = slot_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p1_bypass_hint = slot_uop_iw_p1_bypass_hint; // @[util.scala:104:23]
reg slot_uop_iw_p2_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p2_bypass_hint_0 = slot_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p2_bypass_hint = slot_uop_iw_p2_bypass_hint; // @[util.scala:104:23]
reg slot_uop_iw_p3_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p3_bypass_hint_0 = slot_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p3_bypass_hint = slot_uop_iw_p3_bypass_hint; // @[util.scala:104:23]
reg [1:0] slot_uop_dis_col_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_dis_col_sel_0 = slot_uop_dis_col_sel; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_dis_col_sel = slot_uop_dis_col_sel; // @[util.scala:104:23]
reg [11:0] slot_uop_br_mask; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:49:7, :56:21]
reg [3:0] slot_uop_br_tag; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_br_tag = slot_uop_br_tag; // @[util.scala:104:23]
reg [3:0] slot_uop_br_type; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_type_0 = slot_uop_br_type; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_br_type = slot_uop_br_type; // @[util.scala:104:23]
reg slot_uop_is_sfb; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sfb = slot_uop_is_sfb; // @[util.scala:104:23]
reg slot_uop_is_fence; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_fence = slot_uop_is_fence; // @[util.scala:104:23]
reg slot_uop_is_fencei; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_fencei = slot_uop_is_fencei; // @[util.scala:104:23]
reg slot_uop_is_sfence; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sfence_0 = slot_uop_is_sfence; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sfence = slot_uop_is_sfence; // @[util.scala:104:23]
reg slot_uop_is_amo; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_amo = slot_uop_is_amo; // @[util.scala:104:23]
reg slot_uop_is_eret; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_eret_0 = slot_uop_is_eret; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_eret = slot_uop_is_eret; // @[util.scala:104:23]
reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sys_pc2epc = slot_uop_is_sys_pc2epc; // @[util.scala:104:23]
reg slot_uop_is_rocc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_rocc_0 = slot_uop_is_rocc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_rocc = slot_uop_is_rocc; // @[util.scala:104:23]
reg slot_uop_is_mov; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_mov_0 = slot_uop_is_mov; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_mov = slot_uop_is_mov; // @[util.scala:104:23]
reg [4:0] slot_uop_ftq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ftq_idx = slot_uop_ftq_idx; // @[util.scala:104:23]
reg slot_uop_edge_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_edge_inst = slot_uop_edge_inst; // @[util.scala:104:23]
reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:56:21]
assign io_iss_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_pc_lob = slot_uop_pc_lob; // @[util.scala:104:23]
reg slot_uop_taken; // @[issue-slot.scala:56:21]
assign io_iss_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_taken = slot_uop_taken; // @[util.scala:104:23]
reg slot_uop_imm_rename; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_rename_0 = slot_uop_imm_rename; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_imm_rename = slot_uop_imm_rename; // @[util.scala:104:23]
reg [2:0] slot_uop_imm_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_sel_0 = slot_uop_imm_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_imm_sel = slot_uop_imm_sel; // @[util.scala:104:23]
reg [4:0] slot_uop_pimm; // @[issue-slot.scala:56:21]
assign io_iss_uop_pimm_0 = slot_uop_pimm; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_pimm = slot_uop_pimm; // @[util.scala:104:23]
reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:49:7, :56:21]
wire [19:0] next_uop_out_imm_packed = slot_uop_imm_packed; // @[util.scala:104:23]
reg [1:0] slot_uop_op1_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_op1_sel_0 = slot_uop_op1_sel; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_op1_sel = slot_uop_op1_sel; // @[util.scala:104:23]
reg [2:0] slot_uop_op2_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_op2_sel_0 = slot_uop_op2_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_op2_sel = slot_uop_op2_sel; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ldst; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ldst_0 = slot_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ldst = slot_uop_fp_ctrl_ldst; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_wen; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_wen_0 = slot_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_wen = slot_uop_fp_ctrl_wen; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren1; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren1_0 = slot_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren1 = slot_uop_fp_ctrl_ren1; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren2; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren2_0 = slot_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren2 = slot_uop_fp_ctrl_ren2; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren3; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren3_0 = slot_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren3 = slot_uop_fp_ctrl_ren3; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_swap12; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_swap12_0 = slot_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_swap12 = slot_uop_fp_ctrl_swap12; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_swap23; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_swap23_0 = slot_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_swap23 = slot_uop_fp_ctrl_swap23; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_typeTagIn_0 = slot_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_ctrl_typeTagIn = slot_uop_fp_ctrl_typeTagIn; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_typeTagOut_0 = slot_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_ctrl_typeTagOut = slot_uop_fp_ctrl_typeTagOut; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fromint; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fromint_0 = slot_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fromint = slot_uop_fp_ctrl_fromint; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_toint; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_toint_0 = slot_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_toint = slot_uop_fp_ctrl_toint; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fastpipe_0 = slot_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fastpipe = slot_uop_fp_ctrl_fastpipe; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fma; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fma_0 = slot_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fma = slot_uop_fp_ctrl_fma; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_div; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_div_0 = slot_uop_fp_ctrl_div; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_div = slot_uop_fp_ctrl_div; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_sqrt; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_sqrt_0 = slot_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_sqrt = slot_uop_fp_ctrl_sqrt; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_wflags; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_wflags_0 = slot_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_wflags = slot_uop_fp_ctrl_wflags; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_vec; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_vec_0 = slot_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_vec = slot_uop_fp_ctrl_vec; // @[util.scala:104:23]
reg [5:0] slot_uop_rob_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_rob_idx = slot_uop_rob_idx; // @[util.scala:104:23]
reg [3:0] slot_uop_ldq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_ldq_idx = slot_uop_ldq_idx; // @[util.scala:104:23]
reg [3:0] slot_uop_stq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_stq_idx = slot_uop_stq_idx; // @[util.scala:104:23]
reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_rxq_idx = slot_uop_rxq_idx; // @[util.scala:104:23]
reg [6:0] slot_uop_pdst; // @[issue-slot.scala:56:21]
assign io_iss_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_pdst = slot_uop_pdst; // @[util.scala:104:23]
reg [6:0] slot_uop_prs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs1 = slot_uop_prs1; // @[util.scala:104:23]
reg [6:0] slot_uop_prs2; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs2 = slot_uop_prs2; // @[util.scala:104:23]
reg [6:0] slot_uop_prs3; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs3 = slot_uop_prs3; // @[util.scala:104:23]
reg [4:0] slot_uop_ppred; // @[issue-slot.scala:56:21]
assign io_iss_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ppred = slot_uop_ppred; // @[util.scala:104:23]
reg slot_uop_prs1_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs1_busy = slot_uop_prs1_busy; // @[util.scala:104:23]
reg slot_uop_prs2_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs2_busy = slot_uop_prs2_busy; // @[util.scala:104:23]
reg slot_uop_prs3_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs3_busy = slot_uop_prs3_busy; // @[util.scala:104:23]
reg slot_uop_ppred_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_ppred_busy = slot_uop_ppred_busy; // @[util.scala:104:23]
wire _iss_ready_T_3 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :136:88]
wire _agen_ready_T_2 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :137:95]
wire _dgen_ready_T_2 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :138:95]
reg [6:0] slot_uop_stale_pdst; // @[issue-slot.scala:56:21]
assign io_iss_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_stale_pdst = slot_uop_stale_pdst; // @[util.scala:104:23]
reg slot_uop_exception; // @[issue-slot.scala:56:21]
assign io_iss_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_exception = slot_uop_exception; // @[util.scala:104:23]
reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:56:21]
assign io_iss_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:49:7, :56:21]
wire [63:0] next_uop_out_exc_cause = slot_uop_exc_cause; // @[util.scala:104:23]
reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_mem_cmd = slot_uop_mem_cmd; // @[util.scala:104:23]
reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_mem_size = slot_uop_mem_size; // @[util.scala:104:23]
reg slot_uop_mem_signed; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_mem_signed = slot_uop_mem_signed; // @[util.scala:104:23]
reg slot_uop_uses_ldq; // @[issue-slot.scala:56:21]
assign io_iss_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_uses_ldq = slot_uop_uses_ldq; // @[util.scala:104:23]
reg slot_uop_uses_stq; // @[issue-slot.scala:56:21]
assign io_iss_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_uses_stq = slot_uop_uses_stq; // @[util.scala:104:23]
reg slot_uop_is_unique; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_unique = slot_uop_is_unique; // @[util.scala:104:23]
reg slot_uop_flush_on_commit; // @[issue-slot.scala:56:21]
assign io_iss_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_flush_on_commit = slot_uop_flush_on_commit; // @[util.scala:104:23]
reg [2:0] slot_uop_csr_cmd; // @[issue-slot.scala:56:21]
assign io_iss_uop_csr_cmd_0 = slot_uop_csr_cmd; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_csr_cmd = slot_uop_csr_cmd; // @[util.scala:104:23]
reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_ldst_is_rs1 = slot_uop_ldst_is_rs1; // @[util.scala:104:23]
reg [5:0] slot_uop_ldst; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_ldst = slot_uop_ldst; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs1 = slot_uop_lrs1; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs2 = slot_uop_lrs2; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs3 = slot_uop_lrs3; // @[util.scala:104:23]
reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_dst_rtype = slot_uop_dst_rtype; // @[util.scala:104:23]
reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs1_rtype_0 = slot_uop_lrs1_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_lrs1_rtype = slot_uop_lrs1_rtype; // @[util.scala:104:23]
reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs2_rtype_0 = slot_uop_lrs2_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_lrs2_rtype = slot_uop_lrs2_rtype; // @[util.scala:104:23]
reg slot_uop_frs3_en; // @[issue-slot.scala:56:21]
assign io_iss_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_frs3_en = slot_uop_frs3_en; // @[util.scala:104:23]
reg slot_uop_fcn_dw; // @[issue-slot.scala:56:21]
assign io_iss_uop_fcn_dw_0 = slot_uop_fcn_dw; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fcn_dw = slot_uop_fcn_dw; // @[util.scala:104:23]
reg [4:0] slot_uop_fcn_op; // @[issue-slot.scala:56:21]
assign io_iss_uop_fcn_op_0 = slot_uop_fcn_op; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_fcn_op = slot_uop_fcn_op; // @[util.scala:104:23]
reg slot_uop_fp_val; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_val = slot_uop_fp_val; // @[util.scala:104:23]
reg [2:0] slot_uop_fp_rm; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_rm_0 = slot_uop_fp_rm; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_fp_rm = slot_uop_fp_rm; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_typ; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_typ_0 = slot_uop_fp_typ; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_typ = slot_uop_fp_typ; // @[util.scala:104:23]
reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_pf_if = slot_uop_xcpt_pf_if; // @[util.scala:104:23]
reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_ae_if = slot_uop_xcpt_ae_if; // @[util.scala:104:23]
reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_ma_if = slot_uop_xcpt_ma_if; // @[util.scala:104:23]
reg slot_uop_bp_debug_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_bp_debug_if = slot_uop_bp_debug_if; // @[util.scala:104:23]
reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_bp_xcpt_if = slot_uop_bp_xcpt_if; // @[util.scala:104:23]
reg [2:0] slot_uop_debug_fsrc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_debug_fsrc = slot_uop_debug_fsrc; // @[util.scala:104:23]
reg [2:0] slot_uop_debug_tsrc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_debug_tsrc = slot_uop_debug_tsrc; // @[util.scala:104:23]
wire next_valid; // @[issue-slot.scala:58:28]
assign next_uop_inst = next_uop_out_inst; // @[util.scala:104:23]
assign next_uop_debug_inst = next_uop_out_debug_inst; // @[util.scala:104:23]
assign next_uop_is_rvc = next_uop_out_is_rvc; // @[util.scala:104:23]
assign next_uop_debug_pc = next_uop_out_debug_pc; // @[util.scala:104:23]
assign next_uop_iq_type_0 = next_uop_out_iq_type_0; // @[util.scala:104:23]
assign next_uop_iq_type_1 = next_uop_out_iq_type_1; // @[util.scala:104:23]
assign next_uop_iq_type_2 = next_uop_out_iq_type_2; // @[util.scala:104:23]
assign next_uop_iq_type_3 = next_uop_out_iq_type_3; // @[util.scala:104:23]
assign next_uop_fu_code_0 = next_uop_out_fu_code_0; // @[util.scala:104:23]
assign next_uop_fu_code_1 = next_uop_out_fu_code_1; // @[util.scala:104:23]
assign next_uop_fu_code_2 = next_uop_out_fu_code_2; // @[util.scala:104:23]
assign next_uop_fu_code_3 = next_uop_out_fu_code_3; // @[util.scala:104:23]
assign next_uop_fu_code_4 = next_uop_out_fu_code_4; // @[util.scala:104:23]
assign next_uop_fu_code_5 = next_uop_out_fu_code_5; // @[util.scala:104:23]
assign next_uop_fu_code_6 = next_uop_out_fu_code_6; // @[util.scala:104:23]
assign next_uop_fu_code_7 = next_uop_out_fu_code_7; // @[util.scala:104:23]
assign next_uop_fu_code_8 = next_uop_out_fu_code_8; // @[util.scala:104:23]
assign next_uop_fu_code_9 = next_uop_out_fu_code_9; // @[util.scala:104:23]
wire [11:0] _next_uop_out_br_mask_T_1; // @[util.scala:93:25]
assign next_uop_dis_col_sel = next_uop_out_dis_col_sel; // @[util.scala:104:23]
assign next_uop_br_mask = next_uop_out_br_mask; // @[util.scala:104:23]
assign next_uop_br_tag = next_uop_out_br_tag; // @[util.scala:104:23]
assign next_uop_br_type = next_uop_out_br_type; // @[util.scala:104:23]
assign next_uop_is_sfb = next_uop_out_is_sfb; // @[util.scala:104:23]
assign next_uop_is_fence = next_uop_out_is_fence; // @[util.scala:104:23]
assign next_uop_is_fencei = next_uop_out_is_fencei; // @[util.scala:104:23]
assign next_uop_is_sfence = next_uop_out_is_sfence; // @[util.scala:104:23]
assign next_uop_is_amo = next_uop_out_is_amo; // @[util.scala:104:23]
assign next_uop_is_eret = next_uop_out_is_eret; // @[util.scala:104:23]
assign next_uop_is_sys_pc2epc = next_uop_out_is_sys_pc2epc; // @[util.scala:104:23]
assign next_uop_is_rocc = next_uop_out_is_rocc; // @[util.scala:104:23]
assign next_uop_is_mov = next_uop_out_is_mov; // @[util.scala:104:23]
assign next_uop_ftq_idx = next_uop_out_ftq_idx; // @[util.scala:104:23]
assign next_uop_edge_inst = next_uop_out_edge_inst; // @[util.scala:104:23]
assign next_uop_pc_lob = next_uop_out_pc_lob; // @[util.scala:104:23]
assign next_uop_taken = next_uop_out_taken; // @[util.scala:104:23]
assign next_uop_imm_rename = next_uop_out_imm_rename; // @[util.scala:104:23]
assign next_uop_imm_sel = next_uop_out_imm_sel; // @[util.scala:104:23]
assign next_uop_pimm = next_uop_out_pimm; // @[util.scala:104:23]
assign next_uop_imm_packed = next_uop_out_imm_packed; // @[util.scala:104:23]
assign next_uop_op1_sel = next_uop_out_op1_sel; // @[util.scala:104:23]
assign next_uop_op2_sel = next_uop_out_op2_sel; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ldst = next_uop_out_fp_ctrl_ldst; // @[util.scala:104:23]
assign next_uop_fp_ctrl_wen = next_uop_out_fp_ctrl_wen; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren1 = next_uop_out_fp_ctrl_ren1; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren2 = next_uop_out_fp_ctrl_ren2; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren3 = next_uop_out_fp_ctrl_ren3; // @[util.scala:104:23]
assign next_uop_fp_ctrl_swap12 = next_uop_out_fp_ctrl_swap12; // @[util.scala:104:23]
assign next_uop_fp_ctrl_swap23 = next_uop_out_fp_ctrl_swap23; // @[util.scala:104:23]
assign next_uop_fp_ctrl_typeTagIn = next_uop_out_fp_ctrl_typeTagIn; // @[util.scala:104:23]
assign next_uop_fp_ctrl_typeTagOut = next_uop_out_fp_ctrl_typeTagOut; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fromint = next_uop_out_fp_ctrl_fromint; // @[util.scala:104:23]
assign next_uop_fp_ctrl_toint = next_uop_out_fp_ctrl_toint; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fastpipe = next_uop_out_fp_ctrl_fastpipe; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fma = next_uop_out_fp_ctrl_fma; // @[util.scala:104:23]
assign next_uop_fp_ctrl_div = next_uop_out_fp_ctrl_div; // @[util.scala:104:23]
assign next_uop_fp_ctrl_sqrt = next_uop_out_fp_ctrl_sqrt; // @[util.scala:104:23]
assign next_uop_fp_ctrl_wflags = next_uop_out_fp_ctrl_wflags; // @[util.scala:104:23]
assign next_uop_fp_ctrl_vec = next_uop_out_fp_ctrl_vec; // @[util.scala:104:23]
assign next_uop_rob_idx = next_uop_out_rob_idx; // @[util.scala:104:23]
assign next_uop_ldq_idx = next_uop_out_ldq_idx; // @[util.scala:104:23]
assign next_uop_stq_idx = next_uop_out_stq_idx; // @[util.scala:104:23]
assign next_uop_rxq_idx = next_uop_out_rxq_idx; // @[util.scala:104:23]
assign next_uop_pdst = next_uop_out_pdst; // @[util.scala:104:23]
assign next_uop_prs1 = next_uop_out_prs1; // @[util.scala:104:23]
assign next_uop_prs2 = next_uop_out_prs2; // @[util.scala:104:23]
assign next_uop_prs3 = next_uop_out_prs3; // @[util.scala:104:23]
assign next_uop_ppred = next_uop_out_ppred; // @[util.scala:104:23]
assign next_uop_ppred_busy = next_uop_out_ppred_busy; // @[util.scala:104:23]
assign next_uop_stale_pdst = next_uop_out_stale_pdst; // @[util.scala:104:23]
assign next_uop_exception = next_uop_out_exception; // @[util.scala:104:23]
assign next_uop_exc_cause = next_uop_out_exc_cause; // @[util.scala:104:23]
assign next_uop_mem_cmd = next_uop_out_mem_cmd; // @[util.scala:104:23]
assign next_uop_mem_size = next_uop_out_mem_size; // @[util.scala:104:23]
assign next_uop_mem_signed = next_uop_out_mem_signed; // @[util.scala:104:23]
assign next_uop_uses_ldq = next_uop_out_uses_ldq; // @[util.scala:104:23]
assign next_uop_uses_stq = next_uop_out_uses_stq; // @[util.scala:104:23]
assign next_uop_is_unique = next_uop_out_is_unique; // @[util.scala:104:23]
assign next_uop_flush_on_commit = next_uop_out_flush_on_commit; // @[util.scala:104:23]
assign next_uop_csr_cmd = next_uop_out_csr_cmd; // @[util.scala:104:23]
assign next_uop_ldst_is_rs1 = next_uop_out_ldst_is_rs1; // @[util.scala:104:23]
assign next_uop_ldst = next_uop_out_ldst; // @[util.scala:104:23]
assign next_uop_lrs1 = next_uop_out_lrs1; // @[util.scala:104:23]
assign next_uop_lrs2 = next_uop_out_lrs2; // @[util.scala:104:23]
assign next_uop_lrs3 = next_uop_out_lrs3; // @[util.scala:104:23]
assign next_uop_dst_rtype = next_uop_out_dst_rtype; // @[util.scala:104:23]
assign next_uop_lrs1_rtype = next_uop_out_lrs1_rtype; // @[util.scala:104:23]
assign next_uop_lrs2_rtype = next_uop_out_lrs2_rtype; // @[util.scala:104:23]
assign next_uop_frs3_en = next_uop_out_frs3_en; // @[util.scala:104:23]
assign next_uop_fcn_dw = next_uop_out_fcn_dw; // @[util.scala:104:23]
assign next_uop_fcn_op = next_uop_out_fcn_op; // @[util.scala:104:23]
assign next_uop_fp_val = next_uop_out_fp_val; // @[util.scala:104:23]
assign next_uop_fp_rm = next_uop_out_fp_rm; // @[util.scala:104:23]
assign next_uop_fp_typ = next_uop_out_fp_typ; // @[util.scala:104:23]
assign next_uop_xcpt_pf_if = next_uop_out_xcpt_pf_if; // @[util.scala:104:23]
assign next_uop_xcpt_ae_if = next_uop_out_xcpt_ae_if; // @[util.scala:104:23]
assign next_uop_xcpt_ma_if = next_uop_out_xcpt_ma_if; // @[util.scala:104:23]
assign next_uop_bp_debug_if = next_uop_out_bp_debug_if; // @[util.scala:104:23]
assign next_uop_bp_xcpt_if = next_uop_out_bp_xcpt_if; // @[util.scala:104:23]
assign next_uop_debug_fsrc = next_uop_out_debug_fsrc; // @[util.scala:104:23]
assign next_uop_debug_tsrc = next_uop_out_debug_tsrc; // @[util.scala:104:23]
wire [11:0] _next_uop_out_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:93:27]
assign _next_uop_out_br_mask_T_1 = slot_uop_br_mask & _next_uop_out_br_mask_T; // @[util.scala:93:{25,27}]
assign next_uop_out_br_mask = _next_uop_out_br_mask_T_1; // @[util.scala:93:25, :104:23]
assign io_out_uop_inst_0 = next_uop_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_inst_0 = next_uop_debug_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_rvc_0 = next_uop_is_rvc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_pc_0 = next_uop_debug_pc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_0_0 = next_uop_iq_type_0; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_1_0 = next_uop_iq_type_1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_2_0 = next_uop_iq_type_2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_3_0 = next_uop_iq_type_3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_0_0 = next_uop_fu_code_0; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_1_0 = next_uop_fu_code_1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_2_0 = next_uop_fu_code_2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_3_0 = next_uop_fu_code_3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_4_0 = next_uop_fu_code_4; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_5_0 = next_uop_fu_code_5; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_6_0 = next_uop_fu_code_6; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_7_0 = next_uop_fu_code_7; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_8_0 = next_uop_fu_code_8; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_9_0 = next_uop_fu_code_9; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_issued_0 = next_uop_iw_issued; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p1_speculative_child_0 = next_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p2_speculative_child_0 = next_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p1_bypass_hint_0 = next_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p2_bypass_hint_0 = next_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p3_bypass_hint_0 = next_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_dis_col_sel_0 = next_uop_dis_col_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_mask_0 = next_uop_br_mask; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_tag_0 = next_uop_br_tag; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_type_0 = next_uop_br_type; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sfb_0 = next_uop_is_sfb; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_fence_0 = next_uop_is_fence; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_fencei_0 = next_uop_is_fencei; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sfence_0 = next_uop_is_sfence; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_amo_0 = next_uop_is_amo; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_eret_0 = next_uop_is_eret; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sys_pc2epc_0 = next_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_rocc_0 = next_uop_is_rocc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_mov_0 = next_uop_is_mov; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ftq_idx_0 = next_uop_ftq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_edge_inst_0 = next_uop_edge_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pc_lob_0 = next_uop_pc_lob; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_taken_0 = next_uop_taken; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_rename_0 = next_uop_imm_rename; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_sel_0 = next_uop_imm_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pimm_0 = next_uop_pimm; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_packed_0 = next_uop_imm_packed; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_op1_sel_0 = next_uop_op1_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_op2_sel_0 = next_uop_op2_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ldst_0 = next_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_wen_0 = next_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren1_0 = next_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren2_0 = next_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren3_0 = next_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_swap12_0 = next_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_swap23_0 = next_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_typeTagIn_0 = next_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_typeTagOut_0 = next_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fromint_0 = next_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_toint_0 = next_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fastpipe_0 = next_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fma_0 = next_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_div_0 = next_uop_fp_ctrl_div; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_sqrt_0 = next_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_wflags_0 = next_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_vec_0 = next_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_rob_idx_0 = next_uop_rob_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldq_idx_0 = next_uop_ldq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_stq_idx_0 = next_uop_stq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_rxq_idx_0 = next_uop_rxq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pdst_0 = next_uop_pdst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs1_0 = next_uop_prs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs2_0 = next_uop_prs2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs3_0 = next_uop_prs3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ppred_0 = next_uop_ppred; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs1_busy_0 = next_uop_prs1_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs2_busy_0 = next_uop_prs2_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs3_busy_0 = next_uop_prs3_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ppred_busy_0 = next_uop_ppred_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_stale_pdst_0 = next_uop_stale_pdst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_exception_0 = next_uop_exception; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_exc_cause_0 = next_uop_exc_cause; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_cmd_0 = next_uop_mem_cmd; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_size_0 = next_uop_mem_size; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_signed_0 = next_uop_mem_signed; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_uses_ldq_0 = next_uop_uses_ldq; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_uses_stq_0 = next_uop_uses_stq; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_unique_0 = next_uop_is_unique; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_flush_on_commit_0 = next_uop_flush_on_commit; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_csr_cmd_0 = next_uop_csr_cmd; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldst_is_rs1_0 = next_uop_ldst_is_rs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldst_0 = next_uop_ldst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs1_0 = next_uop_lrs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs2_0 = next_uop_lrs2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs3_0 = next_uop_lrs3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_dst_rtype_0 = next_uop_dst_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs1_rtype_0 = next_uop_lrs1_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs2_rtype_0 = next_uop_lrs2_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_frs3_en_0 = next_uop_frs3_en; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fcn_dw_0 = next_uop_fcn_dw; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fcn_op_0 = next_uop_fcn_op; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_val_0 = next_uop_fp_val; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_rm_0 = next_uop_fp_rm; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_typ_0 = next_uop_fp_typ; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_pf_if_0 = next_uop_xcpt_pf_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_ae_if_0 = next_uop_xcpt_ae_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_ma_if_0 = next_uop_xcpt_ma_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_bp_debug_if_0 = next_uop_bp_debug_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_bp_xcpt_if_0 = next_uop_bp_xcpt_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_fsrc_0 = next_uop_debug_fsrc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_tsrc_0 = next_uop_debug_tsrc; // @[issue-slot.scala:49:7, :59:28]
wire [11:0] _killed_T = io_brupdate_b1_mispredict_mask_0 & slot_uop_br_mask; // @[util.scala:126:51]
wire _killed_T_1 = |_killed_T; // @[util.scala:126:{51,59}]
wire killed = _killed_T_1 | io_kill_0; // @[util.scala:61:61, :126:59]
wire _io_will_be_valid_T = ~killed; // @[util.scala:61:61]
assign _io_will_be_valid_T_1 = next_valid & _io_will_be_valid_T; // @[issue-slot.scala:58:28, :65:{34,37}]
assign io_will_be_valid_0 = _io_will_be_valid_T_1; // @[issue-slot.scala:49:7, :65:34]
wire _slot_valid_T = ~killed; // @[util.scala:61:61]
wire _slot_valid_T_1 = next_valid & _slot_valid_T; // @[issue-slot.scala:58:28, :74:{30,33}] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_168( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [31:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7]
wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54]
wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a29d64s7k1z4u( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [28:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [28:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [6:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [28:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [6:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [6:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [28:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [6:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [6:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [28:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [6:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [6:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [6:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [28:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
TLMonitor_22 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue2_TLBundleA_a29d64s7k1z4u nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a29d64s7k1z4u nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_param (nodeOut_d_bits_param), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_sink (nodeOut_d_bits_sink), // @[MixedNode.scala:542:17]
.io_enq_bits_denied (nodeOut_d_bits_denied), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_enq_bits_corrupt (nodeOut_d_bits_corrupt), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_115( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0 // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
PE_371 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ClockGroup.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.prci
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.resources.FixedClockResource
case class ClockGroupingNode(groupName: String)(implicit valName: ValName)
extends MixedNexusNode(ClockGroupImp, ClockImp)(
dFn = { _ => ClockSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) })
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupingNode(groupName)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
require (node.in.size == 1)
require (in.member.size == out.size)
(in.member.data zip out) foreach { case (i, o) => o := i }
}
}
object ClockGroup
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node
}
case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))})
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupAggregateNode(groupName)
override lazy val desiredName = s"ClockGroupAggregator_$groupName"
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in.unzip
val (out, _) = node.out.unzip
val outputs = out.flatMap(_.member.data)
require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1")
require (in.head.member.size == outputs.size)
in.head.member.data.zip(outputs).foreach { case (i, o) => o := i }
}
}
object ClockGroupAggregator
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node
}
class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val (out, _) = node.out.unzip
out.map { out: ClockGroupBundle =>
out.member.data.foreach { o =>
o.clock := clock; o.reset := reset }
}
}
}
object SimpleClockGroupSource
{
def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node
}
case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName)
extends NexusNode(ClockImp)(
dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) },
uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) },
inputRequiresOutput = false) {
def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix)))
}
class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule
{
val node = new FixedClockBroadcastNode(fixedClockOpt) {
override def circuitIdentity = outputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
override def desiredName = s"FixedClockBroadcast_${out.size}"
require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock")
out.foreach { _ := in }
}
}
object FixedClockBroadcast
{
def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node
}
case class PRCIClockGroupNode()(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { _ => ClockGroupSinkParameters("prci", Nil) },
outputRequiresInput = false)
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module ClockGroupAggregator_allClocks( // @[ClockGroup.scala:53:9]
input auto_in_member_allClocks_clockTapNode_clock_tap_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_clockTapNode_clock_tap_reset, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_cbus_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_cbus_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_mbus_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_mbus_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_fbus_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_fbus_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_pbus_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_pbus_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_ccbus1_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_ccbus1_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_csbus1_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_csbus1_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_ccbus0_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_ccbus0_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_csbus0_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_csbus0_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_sbus_1_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_sbus_1_reset, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_sbus_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_in_member_allClocks_sbus_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_9_member_clockTapNode_clockTapNode_clock_tap_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_9_member_clockTapNode_clockTapNode_clock_tap_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_8_member_cbus_cbus_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_8_member_cbus_cbus_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_7_member_mbus_mbus_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_7_member_mbus_mbus_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_6_member_fbus_fbus_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_6_member_fbus_fbus_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_5_member_pbus_pbus_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_5_member_pbus_pbus_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_4_member_ccbus1_ccbus1_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_4_member_ccbus1_ccbus1_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_3_member_csbus1_csbus1_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_3_member_csbus1_csbus1_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_2_member_ccbus0_ccbus0_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_2_member_ccbus0_ccbus0_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_1_member_csbus0_csbus0_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_1_member_csbus0_csbus0_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_0_member_sbus_sbus_1_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_0_member_sbus_sbus_1_reset, // @[LazyModuleImp.scala:107:25]
output auto_out_0_member_sbus_sbus_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_out_0_member_sbus_sbus_0_reset // @[LazyModuleImp.scala:107:25]
);
wire auto_in_member_allClocks_clockTapNode_clock_tap_clock_0 = auto_in_member_allClocks_clockTapNode_clock_tap_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_clockTapNode_clock_tap_reset_0 = auto_in_member_allClocks_clockTapNode_clock_tap_reset; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_cbus_0_clock_0 = auto_in_member_allClocks_cbus_0_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_cbus_0_reset_0 = auto_in_member_allClocks_cbus_0_reset; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_mbus_0_clock_0 = auto_in_member_allClocks_mbus_0_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_mbus_0_reset_0 = auto_in_member_allClocks_mbus_0_reset; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_fbus_0_clock_0 = auto_in_member_allClocks_fbus_0_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_fbus_0_reset_0 = auto_in_member_allClocks_fbus_0_reset; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_pbus_0_clock_0 = auto_in_member_allClocks_pbus_0_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_pbus_0_reset_0 = auto_in_member_allClocks_pbus_0_reset; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_ccbus1_0_clock_0 = auto_in_member_allClocks_ccbus1_0_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_ccbus1_0_reset_0 = auto_in_member_allClocks_ccbus1_0_reset; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_csbus1_0_clock_0 = auto_in_member_allClocks_csbus1_0_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_csbus1_0_reset_0 = auto_in_member_allClocks_csbus1_0_reset; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_ccbus0_0_clock_0 = auto_in_member_allClocks_ccbus0_0_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_ccbus0_0_reset_0 = auto_in_member_allClocks_ccbus0_0_reset; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_csbus0_0_clock_0 = auto_in_member_allClocks_csbus0_0_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_csbus0_0_reset_0 = auto_in_member_allClocks_csbus0_0_reset; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_sbus_1_clock_0 = auto_in_member_allClocks_sbus_1_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_sbus_1_reset_0 = auto_in_member_allClocks_sbus_1_reset; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_sbus_0_clock_0 = auto_in_member_allClocks_sbus_0_clock; // @[ClockGroup.scala:53:9]
wire auto_in_member_allClocks_sbus_0_reset_0 = auto_in_member_allClocks_sbus_0_reset; // @[ClockGroup.scala:53:9]
wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire nodeIn_member_allClocks_clockTapNode_clock_tap_clock = auto_in_member_allClocks_clockTapNode_clock_tap_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_clockTapNode_clock_tap_reset = auto_in_member_allClocks_clockTapNode_clock_tap_reset_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_cbus_0_clock = auto_in_member_allClocks_cbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_cbus_0_reset = auto_in_member_allClocks_cbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_mbus_0_clock = auto_in_member_allClocks_mbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_mbus_0_reset = auto_in_member_allClocks_mbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_fbus_0_clock = auto_in_member_allClocks_fbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_fbus_0_reset = auto_in_member_allClocks_fbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_pbus_0_clock = auto_in_member_allClocks_pbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_pbus_0_reset = auto_in_member_allClocks_pbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_ccbus1_0_clock = auto_in_member_allClocks_ccbus1_0_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_ccbus1_0_reset = auto_in_member_allClocks_ccbus1_0_reset_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_csbus1_0_clock = auto_in_member_allClocks_csbus1_0_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_csbus1_0_reset = auto_in_member_allClocks_csbus1_0_reset_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_ccbus0_0_clock = auto_in_member_allClocks_ccbus0_0_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_ccbus0_0_reset = auto_in_member_allClocks_ccbus0_0_reset_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_csbus0_0_clock = auto_in_member_allClocks_csbus0_0_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_csbus0_0_reset = auto_in_member_allClocks_csbus0_0_reset_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_sbus_1_clock = auto_in_member_allClocks_sbus_1_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_sbus_1_reset = auto_in_member_allClocks_sbus_1_reset_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_sbus_0_clock = auto_in_member_allClocks_sbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire nodeIn_member_allClocks_sbus_0_reset = auto_in_member_allClocks_sbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire x1_nodeOut_8_member_clockTapNode_clockTapNode_clock_tap_clock; // @[MixedNode.scala:542:17]
wire x1_nodeOut_8_member_clockTapNode_clockTapNode_clock_tap_reset; // @[MixedNode.scala:542:17]
wire x1_nodeOut_7_member_cbus_cbus_0_clock; // @[MixedNode.scala:542:17]
wire x1_nodeOut_7_member_cbus_cbus_0_reset; // @[MixedNode.scala:542:17]
wire x1_nodeOut_6_member_mbus_mbus_0_clock; // @[MixedNode.scala:542:17]
wire x1_nodeOut_6_member_mbus_mbus_0_reset; // @[MixedNode.scala:542:17]
wire x1_nodeOut_5_member_fbus_fbus_0_clock; // @[MixedNode.scala:542:17]
wire x1_nodeOut_5_member_fbus_fbus_0_reset; // @[MixedNode.scala:542:17]
wire x1_nodeOut_4_member_pbus_pbus_0_clock; // @[MixedNode.scala:542:17]
wire x1_nodeOut_4_member_pbus_pbus_0_reset; // @[MixedNode.scala:542:17]
wire x1_nodeOut_3_member_ccbus1_ccbus1_0_clock; // @[MixedNode.scala:542:17]
wire x1_nodeOut_3_member_ccbus1_ccbus1_0_reset; // @[MixedNode.scala:542:17]
wire x1_nodeOut_2_member_csbus1_csbus1_0_clock; // @[MixedNode.scala:542:17]
wire x1_nodeOut_2_member_csbus1_csbus1_0_reset; // @[MixedNode.scala:542:17]
wire x1_nodeOut_1_member_ccbus0_ccbus0_0_clock; // @[MixedNode.scala:542:17]
wire x1_nodeOut_1_member_ccbus0_ccbus0_0_reset; // @[MixedNode.scala:542:17]
wire x1_nodeOut_member_csbus0_csbus0_0_clock; // @[MixedNode.scala:542:17]
wire x1_nodeOut_member_csbus0_csbus0_0_reset; // @[MixedNode.scala:542:17]
wire nodeOut_member_sbus_sbus_1_clock; // @[MixedNode.scala:542:17]
wire nodeOut_member_sbus_sbus_1_reset; // @[MixedNode.scala:542:17]
wire nodeOut_member_sbus_sbus_0_clock; // @[MixedNode.scala:542:17]
wire nodeOut_member_sbus_sbus_0_reset; // @[MixedNode.scala:542:17]
wire auto_out_9_member_clockTapNode_clockTapNode_clock_tap_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_9_member_clockTapNode_clockTapNode_clock_tap_reset_0; // @[ClockGroup.scala:53:9]
wire auto_out_8_member_cbus_cbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_8_member_cbus_cbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire auto_out_7_member_mbus_mbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_7_member_mbus_mbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire auto_out_6_member_fbus_fbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_6_member_fbus_fbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire auto_out_5_member_pbus_pbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_5_member_pbus_pbus_0_reset_0; // @[ClockGroup.scala:53:9]
wire auto_out_4_member_ccbus1_ccbus1_0_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_4_member_ccbus1_ccbus1_0_reset_0; // @[ClockGroup.scala:53:9]
wire auto_out_3_member_csbus1_csbus1_0_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_3_member_csbus1_csbus1_0_reset_0; // @[ClockGroup.scala:53:9]
wire auto_out_2_member_ccbus0_ccbus0_0_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_2_member_ccbus0_ccbus0_0_reset_0; // @[ClockGroup.scala:53:9]
wire auto_out_1_member_csbus0_csbus0_0_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_1_member_csbus0_csbus0_0_reset_0; // @[ClockGroup.scala:53:9]
wire auto_out_0_member_sbus_sbus_1_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_0_member_sbus_sbus_1_reset_0; // @[ClockGroup.scala:53:9]
wire auto_out_0_member_sbus_sbus_0_clock_0; // @[ClockGroup.scala:53:9]
wire auto_out_0_member_sbus_sbus_0_reset_0; // @[ClockGroup.scala:53:9]
assign x1_nodeOut_8_member_clockTapNode_clockTapNode_clock_tap_clock = nodeIn_member_allClocks_clockTapNode_clock_tap_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_8_member_clockTapNode_clockTapNode_clock_tap_reset = nodeIn_member_allClocks_clockTapNode_clock_tap_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_7_member_cbus_cbus_0_clock = nodeIn_member_allClocks_cbus_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_7_member_cbus_cbus_0_reset = nodeIn_member_allClocks_cbus_0_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_6_member_mbus_mbus_0_clock = nodeIn_member_allClocks_mbus_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_6_member_mbus_mbus_0_reset = nodeIn_member_allClocks_mbus_0_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_5_member_fbus_fbus_0_clock = nodeIn_member_allClocks_fbus_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_5_member_fbus_fbus_0_reset = nodeIn_member_allClocks_fbus_0_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_4_member_pbus_pbus_0_clock = nodeIn_member_allClocks_pbus_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_4_member_pbus_pbus_0_reset = nodeIn_member_allClocks_pbus_0_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_3_member_ccbus1_ccbus1_0_clock = nodeIn_member_allClocks_ccbus1_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_3_member_ccbus1_ccbus1_0_reset = nodeIn_member_allClocks_ccbus1_0_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_2_member_csbus1_csbus1_0_clock = nodeIn_member_allClocks_csbus1_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_2_member_csbus1_csbus1_0_reset = nodeIn_member_allClocks_csbus1_0_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_1_member_ccbus0_ccbus0_0_clock = nodeIn_member_allClocks_ccbus0_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_1_member_ccbus0_ccbus0_0_reset = nodeIn_member_allClocks_ccbus0_0_reset; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_member_csbus0_csbus0_0_clock = nodeIn_member_allClocks_csbus0_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign x1_nodeOut_member_csbus0_csbus0_0_reset = nodeIn_member_allClocks_csbus0_0_reset; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_member_sbus_sbus_1_clock = nodeIn_member_allClocks_sbus_1_clock; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_member_sbus_sbus_1_reset = nodeIn_member_allClocks_sbus_1_reset; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_member_sbus_sbus_0_clock = nodeIn_member_allClocks_sbus_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_member_sbus_sbus_0_reset = nodeIn_member_allClocks_sbus_0_reset; // @[MixedNode.scala:542:17, :551:17]
assign auto_out_0_member_sbus_sbus_1_clock_0 = nodeOut_member_sbus_sbus_1_clock; // @[ClockGroup.scala:53:9]
assign auto_out_0_member_sbus_sbus_1_reset_0 = nodeOut_member_sbus_sbus_1_reset; // @[ClockGroup.scala:53:9]
assign auto_out_0_member_sbus_sbus_0_clock_0 = nodeOut_member_sbus_sbus_0_clock; // @[ClockGroup.scala:53:9]
assign auto_out_0_member_sbus_sbus_0_reset_0 = nodeOut_member_sbus_sbus_0_reset; // @[ClockGroup.scala:53:9]
assign auto_out_1_member_csbus0_csbus0_0_clock_0 = x1_nodeOut_member_csbus0_csbus0_0_clock; // @[ClockGroup.scala:53:9]
assign auto_out_1_member_csbus0_csbus0_0_reset_0 = x1_nodeOut_member_csbus0_csbus0_0_reset; // @[ClockGroup.scala:53:9]
assign auto_out_2_member_ccbus0_ccbus0_0_clock_0 = x1_nodeOut_1_member_ccbus0_ccbus0_0_clock; // @[ClockGroup.scala:53:9]
assign auto_out_2_member_ccbus0_ccbus0_0_reset_0 = x1_nodeOut_1_member_ccbus0_ccbus0_0_reset; // @[ClockGroup.scala:53:9]
assign auto_out_3_member_csbus1_csbus1_0_clock_0 = x1_nodeOut_2_member_csbus1_csbus1_0_clock; // @[ClockGroup.scala:53:9]
assign auto_out_3_member_csbus1_csbus1_0_reset_0 = x1_nodeOut_2_member_csbus1_csbus1_0_reset; // @[ClockGroup.scala:53:9]
assign auto_out_4_member_ccbus1_ccbus1_0_clock_0 = x1_nodeOut_3_member_ccbus1_ccbus1_0_clock; // @[ClockGroup.scala:53:9]
assign auto_out_4_member_ccbus1_ccbus1_0_reset_0 = x1_nodeOut_3_member_ccbus1_ccbus1_0_reset; // @[ClockGroup.scala:53:9]
assign auto_out_5_member_pbus_pbus_0_clock_0 = x1_nodeOut_4_member_pbus_pbus_0_clock; // @[ClockGroup.scala:53:9]
assign auto_out_5_member_pbus_pbus_0_reset_0 = x1_nodeOut_4_member_pbus_pbus_0_reset; // @[ClockGroup.scala:53:9]
assign auto_out_6_member_fbus_fbus_0_clock_0 = x1_nodeOut_5_member_fbus_fbus_0_clock; // @[ClockGroup.scala:53:9]
assign auto_out_6_member_fbus_fbus_0_reset_0 = x1_nodeOut_5_member_fbus_fbus_0_reset; // @[ClockGroup.scala:53:9]
assign auto_out_7_member_mbus_mbus_0_clock_0 = x1_nodeOut_6_member_mbus_mbus_0_clock; // @[ClockGroup.scala:53:9]
assign auto_out_7_member_mbus_mbus_0_reset_0 = x1_nodeOut_6_member_mbus_mbus_0_reset; // @[ClockGroup.scala:53:9]
assign auto_out_8_member_cbus_cbus_0_clock_0 = x1_nodeOut_7_member_cbus_cbus_0_clock; // @[ClockGroup.scala:53:9]
assign auto_out_8_member_cbus_cbus_0_reset_0 = x1_nodeOut_7_member_cbus_cbus_0_reset; // @[ClockGroup.scala:53:9]
assign auto_out_9_member_clockTapNode_clockTapNode_clock_tap_clock_0 = x1_nodeOut_8_member_clockTapNode_clockTapNode_clock_tap_clock; // @[ClockGroup.scala:53:9]
assign auto_out_9_member_clockTapNode_clockTapNode_clock_tap_reset_0 = x1_nodeOut_8_member_clockTapNode_clockTapNode_clock_tap_reset; // @[ClockGroup.scala:53:9]
assign auto_out_9_member_clockTapNode_clockTapNode_clock_tap_clock = auto_out_9_member_clockTapNode_clockTapNode_clock_tap_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_9_member_clockTapNode_clockTapNode_clock_tap_reset = auto_out_9_member_clockTapNode_clockTapNode_clock_tap_reset_0; // @[ClockGroup.scala:53:9]
assign auto_out_8_member_cbus_cbus_0_clock = auto_out_8_member_cbus_cbus_0_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_8_member_cbus_cbus_0_reset = auto_out_8_member_cbus_cbus_0_reset_0; // @[ClockGroup.scala:53:9]
assign auto_out_7_member_mbus_mbus_0_clock = auto_out_7_member_mbus_mbus_0_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_7_member_mbus_mbus_0_reset = auto_out_7_member_mbus_mbus_0_reset_0; // @[ClockGroup.scala:53:9]
assign auto_out_6_member_fbus_fbus_0_clock = auto_out_6_member_fbus_fbus_0_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_6_member_fbus_fbus_0_reset = auto_out_6_member_fbus_fbus_0_reset_0; // @[ClockGroup.scala:53:9]
assign auto_out_5_member_pbus_pbus_0_clock = auto_out_5_member_pbus_pbus_0_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_5_member_pbus_pbus_0_reset = auto_out_5_member_pbus_pbus_0_reset_0; // @[ClockGroup.scala:53:9]
assign auto_out_4_member_ccbus1_ccbus1_0_clock = auto_out_4_member_ccbus1_ccbus1_0_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_4_member_ccbus1_ccbus1_0_reset = auto_out_4_member_ccbus1_ccbus1_0_reset_0; // @[ClockGroup.scala:53:9]
assign auto_out_3_member_csbus1_csbus1_0_clock = auto_out_3_member_csbus1_csbus1_0_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_3_member_csbus1_csbus1_0_reset = auto_out_3_member_csbus1_csbus1_0_reset_0; // @[ClockGroup.scala:53:9]
assign auto_out_2_member_ccbus0_ccbus0_0_clock = auto_out_2_member_ccbus0_ccbus0_0_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_2_member_ccbus0_ccbus0_0_reset = auto_out_2_member_ccbus0_ccbus0_0_reset_0; // @[ClockGroup.scala:53:9]
assign auto_out_1_member_csbus0_csbus0_0_clock = auto_out_1_member_csbus0_csbus0_0_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_1_member_csbus0_csbus0_0_reset = auto_out_1_member_csbus0_csbus0_0_reset_0; // @[ClockGroup.scala:53:9]
assign auto_out_0_member_sbus_sbus_1_clock = auto_out_0_member_sbus_sbus_1_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_0_member_sbus_sbus_1_reset = auto_out_0_member_sbus_sbus_1_reset_0; // @[ClockGroup.scala:53:9]
assign auto_out_0_member_sbus_sbus_0_clock = auto_out_0_member_sbus_sbus_0_clock_0; // @[ClockGroup.scala:53:9]
assign auto_out_0_member_sbus_sbus_0_reset = auto_out_0_member_sbus_sbus_0_reset_0; // @[ClockGroup.scala:53:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_65( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_75 io_out_source_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_60( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_60 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_4( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_b_ready, // @[Monitor.scala:20:14]
input io_in_b_valid, // @[Monitor.scala:20:14]
input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14]
input io_in_b_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14]
input io_in_c_ready, // @[Monitor.scala:20:14]
input io_in_c_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_c_bits_size, // @[Monitor.scala:20:14]
input io_in_c_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14]
input [63:0] io_in_c_bits_data, // @[Monitor.scala:20:14]
input io_in_c_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input io_in_d_bits_source, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_e_ready, // @[Monitor.scala:20:14]
input io_in_e_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_e_bits_sink // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_b_ready_0 = io_in_b_ready; // @[Monitor.scala:36:7]
wire io_in_b_valid_0 = io_in_b_valid; // @[Monitor.scala:36:7]
wire [1:0] io_in_b_bits_param_0 = io_in_b_bits_param; // @[Monitor.scala:36:7]
wire io_in_b_bits_source_0 = io_in_b_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_b_bits_address_0 = io_in_b_bits_address; // @[Monitor.scala:36:7]
wire io_in_c_ready_0 = io_in_c_ready; // @[Monitor.scala:36:7]
wire io_in_c_valid_0 = io_in_c_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_c_bits_opcode_0 = io_in_c_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_c_bits_param_0 = io_in_c_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_c_bits_size_0 = io_in_c_bits_size; // @[Monitor.scala:36:7]
wire io_in_c_bits_source_0 = io_in_c_bits_source; // @[Monitor.scala:36:7]
wire [31:0] io_in_c_bits_address_0 = io_in_c_bits_address; // @[Monitor.scala:36:7]
wire [63:0] io_in_c_bits_data_0 = io_in_c_bits_data; // @[Monitor.scala:36:7]
wire io_in_c_bits_corrupt_0 = io_in_c_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_e_ready_0 = io_in_e_ready; // @[Monitor.scala:36:7]
wire io_in_e_valid_0 = io_in_e_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_e_bits_sink_0 = io_in_e_bits_sink; // @[Monitor.scala:36:7]
wire sink_ok = 1'h1; // @[Monitor.scala:309:31]
wire mask_sub_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:206:21]
wire mask_sub_sub_size_1 = 1'h1; // @[Misc.scala:209:26]
wire mask_sub_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_0_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_1_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_2_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_3_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_size_1 = 1'h1; // @[Misc.scala:209:26]
wire mask_acc_8 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_9 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_10 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_11 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_12 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_13 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_14 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_15 = 1'h1; // @[Misc.scala:215:29]
wire sink_ok_1 = 1'h1; // @[Monitor.scala:367:31]
wire _b_first_beats1_opdata_T = 1'h1; // @[Edges.scala:97:37]
wire _b_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire b_first_last = 1'h1; // @[Edges.scala:232:33]
wire [3:0] io_in_b_bits_size = 4'h6; // @[Monitor.scala:36:7]
wire [3:0] _mask_sizeOH_T_3 = 4'h6; // @[Misc.scala:202:34]
wire [2:0] io_in_b_bits_opcode = 3'h6; // @[Monitor.scala:36:7]
wire [7:0] io_in_b_bits_mask = 8'hFF; // @[Monitor.scala:36:7]
wire [7:0] mask_1 = 8'hFF; // @[Misc.scala:222:10]
wire [63:0] io_in_b_bits_data = 64'h0; // @[Monitor.scala:36:7]
wire io_in_b_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire mask_sub_size_1 = 1'h0; // @[Misc.scala:209:26]
wire _mask_sub_acc_T_4 = 1'h0; // @[Misc.scala:215:38]
wire _mask_sub_acc_T_5 = 1'h0; // @[Misc.scala:215:38]
wire _mask_sub_acc_T_6 = 1'h0; // @[Misc.scala:215:38]
wire _mask_sub_acc_T_7 = 1'h0; // @[Misc.scala:215:38]
wire _legal_source_T_2 = 1'h0; // @[Mux.scala:30:73]
wire b_first_beats1_opdata = 1'h0; // @[Edges.scala:97:28]
wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57]
wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57]
wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57]
wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57]
wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51]
wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [3:0] _mask_sizeOH_T_4 = 4'h4; // @[OneHot.scala:65:12]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [2:0] _mask_sizeOH_T_5 = 3'h4; // @[OneHot.scala:65:27]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] mask_sizeOH_1 = 3'h5; // @[Misc.scala:202:81]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [8:0] b_first_beats1 = 9'h0; // @[Edges.scala:221:14]
wire [8:0] b_first_count = 9'h0; // @[Edges.scala:234:25]
wire [8:0] b_first_beats1_decode = 9'h7; // @[Edges.scala:220:59]
wire [11:0] is_aligned_mask_1 = 12'h3F; // @[package.scala:243:46]
wire [11:0] _b_first_beats1_decode_T_2 = 12'h3F; // @[package.scala:243:46]
wire [11:0] _is_aligned_mask_T_3 = 12'hFC0; // @[package.scala:243:76]
wire [11:0] _b_first_beats1_decode_T_1 = 12'hFC0; // @[package.scala:243:76]
wire [26:0] _is_aligned_mask_T_2 = 27'h3FFC0; // @[package.scala:243:71]
wire [26:0] _b_first_beats1_decode_T = 27'h3FFC0; // @[package.scala:243:71]
wire [3:0] mask_lo_1 = 4'hF; // @[Misc.scala:222:10]
wire [3:0] mask_hi_1 = 4'hF; // @[Misc.scala:222:10]
wire [1:0] mask_lo_lo_1 = 2'h3; // @[Misc.scala:222:10]
wire [1:0] mask_lo_hi_1 = 2'h3; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo_1 = 2'h3; // @[Misc.scala:222:10]
wire [1:0] mask_hi_hi_1 = 2'h3; // @[Misc.scala:222:10]
wire [1:0] mask_sizeOH_shiftAmount_1 = 2'h2; // @[OneHot.scala:64:49]
wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117]
wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48]
wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119]
wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48]
wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire _source_ok_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire _legal_source_T_1 = io_in_b_bits_source_0; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T = io_in_b_bits_address_0; // @[Monitor.scala:36:7]
wire _source_ok_T_5 = io_in_c_bits_source_0; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_100 = io_in_c_bits_address_0; // @[Monitor.scala:36:7]
wire _source_ok_T_3 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = ~io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire _source_ok_WIRE_1 = _source_ok_T_1; // @[Parameters.scala:1138:31]
wire source_ok = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire _source_ok_T_2 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_2; // @[Parameters.scala:1138:31]
wire _source_ok_WIRE_1_1 = _source_ok_T_3; // @[Parameters.scala:1138:31]
wire source_ok_1 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire [32:0] _address_ok_T_1 = {1'h0, _address_ok_T}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_2 = _address_ok_T_1 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_3 = _address_ok_T_2; // @[Parameters.scala:137:46]
wire _address_ok_T_4 = _address_ok_T_3 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_0 = _address_ok_T_4; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_5 = {io_in_b_bits_address_0[31:13], io_in_b_bits_address_0[12:0] ^ 13'h1000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_6 = {1'h0, _address_ok_T_5}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_7 = _address_ok_T_6 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_8 = _address_ok_T_7; // @[Parameters.scala:137:46]
wire _address_ok_T_9 = _address_ok_T_8 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1 = _address_ok_T_9; // @[Parameters.scala:612:40]
wire [13:0] _GEN_0 = io_in_b_bits_address_0[13:0] ^ 14'h3000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_10 = {io_in_b_bits_address_0[31:14], _GEN_0}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_11 = {1'h0, _address_ok_T_10}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_12 = _address_ok_T_11 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_13 = _address_ok_T_12; // @[Parameters.scala:137:46]
wire _address_ok_T_14 = _address_ok_T_13 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_2 = _address_ok_T_14; // @[Parameters.scala:612:40]
wire [16:0] _GEN_1 = io_in_b_bits_address_0[16:0] ^ 17'h10000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_15 = {io_in_b_bits_address_0[31:17], _GEN_1}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_16 = {1'h0, _address_ok_T_15}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_17 = _address_ok_T_16 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_18 = _address_ok_T_17; // @[Parameters.scala:137:46]
wire _address_ok_T_19 = _address_ok_T_18 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_3 = _address_ok_T_19; // @[Parameters.scala:612:40]
wire [17:0] _GEN_2 = io_in_b_bits_address_0[17:0] ^ 18'h20000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_20 = {io_in_b_bits_address_0[31:18], _GEN_2}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_21 = {1'h0, _address_ok_T_20}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_22 = _address_ok_T_21 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_23 = _address_ok_T_22; // @[Parameters.scala:137:46]
wire _address_ok_T_24 = _address_ok_T_23 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_4 = _address_ok_T_24; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_25 = {io_in_b_bits_address_0[31:18], io_in_b_bits_address_0[17:0] ^ 18'h21000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_26 = {1'h0, _address_ok_T_25}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_27 = _address_ok_T_26 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_28 = _address_ok_T_27; // @[Parameters.scala:137:46]
wire _address_ok_T_29 = _address_ok_T_28 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_5 = _address_ok_T_29; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_30 = {io_in_b_bits_address_0[31:18], io_in_b_bits_address_0[17:0] ^ 18'h22000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_31 = {1'h0, _address_ok_T_30}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_32 = _address_ok_T_31 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_33 = _address_ok_T_32; // @[Parameters.scala:137:46]
wire _address_ok_T_34 = _address_ok_T_33 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_6 = _address_ok_T_34; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_35 = {io_in_b_bits_address_0[31:18], io_in_b_bits_address_0[17:0] ^ 18'h23000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_36 = {1'h0, _address_ok_T_35}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_37 = _address_ok_T_36 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_38 = _address_ok_T_37; // @[Parameters.scala:137:46]
wire _address_ok_T_39 = _address_ok_T_38 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_7 = _address_ok_T_39; // @[Parameters.scala:612:40]
wire [17:0] _GEN_3 = io_in_b_bits_address_0[17:0] ^ 18'h24000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_40 = {io_in_b_bits_address_0[31:18], _GEN_3}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_41 = {1'h0, _address_ok_T_40}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_42 = _address_ok_T_41 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_43 = _address_ok_T_42; // @[Parameters.scala:137:46]
wire _address_ok_T_44 = _address_ok_T_43 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_8 = _address_ok_T_44; // @[Parameters.scala:612:40]
wire [20:0] _GEN_4 = io_in_b_bits_address_0[20:0] ^ 21'h100000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_45 = {io_in_b_bits_address_0[31:21], _GEN_4}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_46 = {1'h0, _address_ok_T_45}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_47 = _address_ok_T_46 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_48 = _address_ok_T_47; // @[Parameters.scala:137:46]
wire _address_ok_T_49 = _address_ok_T_48 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_9 = _address_ok_T_49; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_50 = {io_in_b_bits_address_0[31:21], io_in_b_bits_address_0[20:0] ^ 21'h110000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_51 = {1'h0, _address_ok_T_50}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_52 = _address_ok_T_51 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_53 = _address_ok_T_52; // @[Parameters.scala:137:46]
wire _address_ok_T_54 = _address_ok_T_53 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_10 = _address_ok_T_54; // @[Parameters.scala:612:40]
wire [25:0] _GEN_5 = io_in_b_bits_address_0[25:0] ^ 26'h2000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_55 = {io_in_b_bits_address_0[31:26], _GEN_5}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_56 = {1'h0, _address_ok_T_55}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_57 = _address_ok_T_56 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_58 = _address_ok_T_57; // @[Parameters.scala:137:46]
wire _address_ok_T_59 = _address_ok_T_58 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_11 = _address_ok_T_59; // @[Parameters.scala:612:40]
wire [25:0] _GEN_6 = io_in_b_bits_address_0[25:0] ^ 26'h2010000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_60 = {io_in_b_bits_address_0[31:26], _GEN_6}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_61 = {1'h0, _address_ok_T_60}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_62 = _address_ok_T_61 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_63 = _address_ok_T_62; // @[Parameters.scala:137:46]
wire _address_ok_T_64 = _address_ok_T_63 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_12 = _address_ok_T_64; // @[Parameters.scala:612:40]
wire [27:0] _GEN_7 = io_in_b_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_65 = {io_in_b_bits_address_0[31:28], _GEN_7}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_66 = {1'h0, _address_ok_T_65}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_67 = _address_ok_T_66 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_68 = _address_ok_T_67; // @[Parameters.scala:137:46]
wire _address_ok_T_69 = _address_ok_T_68 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_13 = _address_ok_T_69; // @[Parameters.scala:612:40]
wire [27:0] _GEN_8 = io_in_b_bits_address_0[27:0] ^ 28'hC000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_70 = {io_in_b_bits_address_0[31:28], _GEN_8}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_71 = {1'h0, _address_ok_T_70}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_72 = _address_ok_T_71 & 33'h1FC000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_73 = _address_ok_T_72; // @[Parameters.scala:137:46]
wire _address_ok_T_74 = _address_ok_T_73 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_14 = _address_ok_T_74; // @[Parameters.scala:612:40]
wire [28:0] _GEN_9 = io_in_b_bits_address_0[28:0] ^ 29'h10020000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_75 = {io_in_b_bits_address_0[31:29], _GEN_9}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_76 = {1'h0, _address_ok_T_75}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_77 = _address_ok_T_76 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_78 = _address_ok_T_77; // @[Parameters.scala:137:46]
wire _address_ok_T_79 = _address_ok_T_78 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_15 = _address_ok_T_79; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_80 = io_in_b_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_81 = {1'h0, _address_ok_T_80}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_82 = _address_ok_T_81 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_83 = _address_ok_T_82; // @[Parameters.scala:137:46]
wire _address_ok_T_84 = _address_ok_T_83 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_16 = _address_ok_T_84; // @[Parameters.scala:612:40]
wire _address_ok_T_85 = _address_ok_WIRE_0 | _address_ok_WIRE_1; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_86 = _address_ok_T_85 | _address_ok_WIRE_2; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_87 = _address_ok_T_86 | _address_ok_WIRE_3; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_88 = _address_ok_T_87 | _address_ok_WIRE_4; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_89 = _address_ok_T_88 | _address_ok_WIRE_5; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_90 = _address_ok_T_89 | _address_ok_WIRE_6; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_91 = _address_ok_T_90 | _address_ok_WIRE_7; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_92 = _address_ok_T_91 | _address_ok_WIRE_8; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_93 = _address_ok_T_92 | _address_ok_WIRE_9; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_94 = _address_ok_T_93 | _address_ok_WIRE_10; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_95 = _address_ok_T_94 | _address_ok_WIRE_11; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_96 = _address_ok_T_95 | _address_ok_WIRE_12; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_97 = _address_ok_T_96 | _address_ok_WIRE_13; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_98 = _address_ok_T_97 | _address_ok_WIRE_14; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_99 = _address_ok_T_98 | _address_ok_WIRE_15; // @[Parameters.scala:612:40, :636:64]
wire address_ok = _address_ok_T_99 | _address_ok_WIRE_16; // @[Parameters.scala:612:40, :636:64]
wire [31:0] _is_aligned_T_1 = {26'h0, io_in_b_bits_address_0[5:0]}; // @[Monitor.scala:36:7]
wire is_aligned_1 = _is_aligned_T_1 == 32'h0; // @[Edges.scala:21:{16,24}]
wire mask_sub_sub_bit_1 = io_in_b_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2_1 = mask_sub_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit_1 = ~mask_sub_sub_bit_1; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2_1 = mask_sub_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T_2 = mask_sub_sub_0_2_1; // @[Misc.scala:214:27, :215:38]
wire _mask_sub_sub_acc_T_3 = mask_sub_sub_1_2_1; // @[Misc.scala:214:27, :215:38]
wire mask_sub_bit_1 = io_in_b_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit_1 = ~mask_sub_bit_1; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2_1 = mask_sub_sub_0_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire mask_sub_1_2_1 = mask_sub_sub_0_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire mask_sub_2_2_1 = mask_sub_sub_1_2_1 & mask_sub_nbit_1; // @[Misc.scala:211:20, :214:27]
wire mask_sub_3_2_1 = mask_sub_sub_1_2_1 & mask_sub_bit_1; // @[Misc.scala:210:26, :214:27]
wire mask_bit_1 = io_in_b_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit_1 = ~mask_bit_1; // @[Misc.scala:210:26, :211:20]
wire mask_eq_8 = mask_sub_0_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_8 = mask_eq_8; // @[Misc.scala:214:27, :215:38]
wire mask_eq_9 = mask_sub_0_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_9 = mask_eq_9; // @[Misc.scala:214:27, :215:38]
wire mask_eq_10 = mask_sub_1_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_10 = mask_eq_10; // @[Misc.scala:214:27, :215:38]
wire mask_eq_11 = mask_sub_1_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_11 = mask_eq_11; // @[Misc.scala:214:27, :215:38]
wire mask_eq_12 = mask_sub_2_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_12 = mask_eq_12; // @[Misc.scala:214:27, :215:38]
wire mask_eq_13 = mask_sub_2_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_13 = mask_eq_13; // @[Misc.scala:214:27, :215:38]
wire mask_eq_14 = mask_sub_3_2_1 & mask_nbit_1; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_14 = mask_eq_14; // @[Misc.scala:214:27, :215:38]
wire mask_eq_15 = mask_sub_3_2_1 & mask_bit_1; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_15 = mask_eq_15; // @[Misc.scala:214:27, :215:38]
wire _legal_source_T = ~io_in_b_bits_source_0; // @[Monitor.scala:36:7]
wire _legal_source_WIRE_0 = _legal_source_T; // @[Parameters.scala:1138:31]
wire _legal_source_WIRE_1 = _legal_source_T_1; // @[Parameters.scala:1138:31]
wire _legal_source_T_3 = _legal_source_WIRE_1; // @[Mux.scala:30:73]
wire _legal_source_T_4 = _legal_source_T_3; // @[Mux.scala:30:73]
wire _legal_source_WIRE_1_0 = _legal_source_T_4; // @[Mux.scala:30:73]
wire legal_source = _legal_source_WIRE_1_0 == io_in_b_bits_source_0; // @[Mux.scala:30:73]
wire _source_ok_T_4 = ~io_in_c_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_2_0 = _source_ok_T_4; // @[Parameters.scala:1138:31]
wire _source_ok_WIRE_2_1 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire source_ok_2 = _source_ok_WIRE_2_0 | _source_ok_WIRE_2_1; // @[Parameters.scala:1138:31, :1139:46]
wire [26:0] _GEN_10 = 27'hFFF << io_in_c_bits_size_0; // @[package.scala:243:71]
wire [26:0] _is_aligned_mask_T_4; // @[package.scala:243:71]
assign _is_aligned_mask_T_4 = _GEN_10; // @[package.scala:243:71]
wire [26:0] _c_first_beats1_decode_T; // @[package.scala:243:71]
assign _c_first_beats1_decode_T = _GEN_10; // @[package.scala:243:71]
wire [26:0] _c_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _c_first_beats1_decode_T_3 = _GEN_10; // @[package.scala:243:71]
wire [11:0] _is_aligned_mask_T_5 = _is_aligned_mask_T_4[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] is_aligned_mask_2 = ~_is_aligned_mask_T_5; // @[package.scala:243:{46,76}]
wire [31:0] _is_aligned_T_2 = {20'h0, io_in_c_bits_address_0[11:0] & is_aligned_mask_2}; // @[package.scala:243:46]
wire is_aligned_2 = _is_aligned_T_2 == 32'h0; // @[Edges.scala:21:{16,24}]
wire [32:0] _address_ok_T_101 = {1'h0, _address_ok_T_100}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_102 = _address_ok_T_101 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_103 = _address_ok_T_102; // @[Parameters.scala:137:46]
wire _address_ok_T_104 = _address_ok_T_103 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_0 = _address_ok_T_104; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_105 = {io_in_c_bits_address_0[31:13], io_in_c_bits_address_0[12:0] ^ 13'h1000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_106 = {1'h0, _address_ok_T_105}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_107 = _address_ok_T_106 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_108 = _address_ok_T_107; // @[Parameters.scala:137:46]
wire _address_ok_T_109 = _address_ok_T_108 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_1 = _address_ok_T_109; // @[Parameters.scala:612:40]
wire [13:0] _GEN_11 = io_in_c_bits_address_0[13:0] ^ 14'h3000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_110 = {io_in_c_bits_address_0[31:14], _GEN_11}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_111 = {1'h0, _address_ok_T_110}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_112 = _address_ok_T_111 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_113 = _address_ok_T_112; // @[Parameters.scala:137:46]
wire _address_ok_T_114 = _address_ok_T_113 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_2 = _address_ok_T_114; // @[Parameters.scala:612:40]
wire [16:0] _GEN_12 = io_in_c_bits_address_0[16:0] ^ 17'h10000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_115 = {io_in_c_bits_address_0[31:17], _GEN_12}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_116 = {1'h0, _address_ok_T_115}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_117 = _address_ok_T_116 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_118 = _address_ok_T_117; // @[Parameters.scala:137:46]
wire _address_ok_T_119 = _address_ok_T_118 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_3 = _address_ok_T_119; // @[Parameters.scala:612:40]
wire [17:0] _GEN_13 = io_in_c_bits_address_0[17:0] ^ 18'h20000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_120 = {io_in_c_bits_address_0[31:18], _GEN_13}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_121 = {1'h0, _address_ok_T_120}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_122 = _address_ok_T_121 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_123 = _address_ok_T_122; // @[Parameters.scala:137:46]
wire _address_ok_T_124 = _address_ok_T_123 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_4 = _address_ok_T_124; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_125 = {io_in_c_bits_address_0[31:18], io_in_c_bits_address_0[17:0] ^ 18'h21000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_126 = {1'h0, _address_ok_T_125}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_127 = _address_ok_T_126 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_128 = _address_ok_T_127; // @[Parameters.scala:137:46]
wire _address_ok_T_129 = _address_ok_T_128 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_5 = _address_ok_T_129; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_130 = {io_in_c_bits_address_0[31:18], io_in_c_bits_address_0[17:0] ^ 18'h22000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_131 = {1'h0, _address_ok_T_130}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_132 = _address_ok_T_131 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_133 = _address_ok_T_132; // @[Parameters.scala:137:46]
wire _address_ok_T_134 = _address_ok_T_133 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_6 = _address_ok_T_134; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_135 = {io_in_c_bits_address_0[31:18], io_in_c_bits_address_0[17:0] ^ 18'h23000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_136 = {1'h0, _address_ok_T_135}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_137 = _address_ok_T_136 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_138 = _address_ok_T_137; // @[Parameters.scala:137:46]
wire _address_ok_T_139 = _address_ok_T_138 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_7 = _address_ok_T_139; // @[Parameters.scala:612:40]
wire [17:0] _GEN_14 = io_in_c_bits_address_0[17:0] ^ 18'h24000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_140 = {io_in_c_bits_address_0[31:18], _GEN_14}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_141 = {1'h0, _address_ok_T_140}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_142 = _address_ok_T_141 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_143 = _address_ok_T_142; // @[Parameters.scala:137:46]
wire _address_ok_T_144 = _address_ok_T_143 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_8 = _address_ok_T_144; // @[Parameters.scala:612:40]
wire [20:0] _GEN_15 = io_in_c_bits_address_0[20:0] ^ 21'h100000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_145 = {io_in_c_bits_address_0[31:21], _GEN_15}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_146 = {1'h0, _address_ok_T_145}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_147 = _address_ok_T_146 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_148 = _address_ok_T_147; // @[Parameters.scala:137:46]
wire _address_ok_T_149 = _address_ok_T_148 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_9 = _address_ok_T_149; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_150 = {io_in_c_bits_address_0[31:21], io_in_c_bits_address_0[20:0] ^ 21'h110000}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_151 = {1'h0, _address_ok_T_150}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_152 = _address_ok_T_151 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_153 = _address_ok_T_152; // @[Parameters.scala:137:46]
wire _address_ok_T_154 = _address_ok_T_153 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_10 = _address_ok_T_154; // @[Parameters.scala:612:40]
wire [25:0] _GEN_16 = io_in_c_bits_address_0[25:0] ^ 26'h2000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_155 = {io_in_c_bits_address_0[31:26], _GEN_16}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_156 = {1'h0, _address_ok_T_155}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_157 = _address_ok_T_156 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_158 = _address_ok_T_157; // @[Parameters.scala:137:46]
wire _address_ok_T_159 = _address_ok_T_158 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_11 = _address_ok_T_159; // @[Parameters.scala:612:40]
wire [25:0] _GEN_17 = io_in_c_bits_address_0[25:0] ^ 26'h2010000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_160 = {io_in_c_bits_address_0[31:26], _GEN_17}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_161 = {1'h0, _address_ok_T_160}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_162 = _address_ok_T_161 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_163 = _address_ok_T_162; // @[Parameters.scala:137:46]
wire _address_ok_T_164 = _address_ok_T_163 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_12 = _address_ok_T_164; // @[Parameters.scala:612:40]
wire [27:0] _GEN_18 = io_in_c_bits_address_0[27:0] ^ 28'h8000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_165 = {io_in_c_bits_address_0[31:28], _GEN_18}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_166 = {1'h0, _address_ok_T_165}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_167 = _address_ok_T_166 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_168 = _address_ok_T_167; // @[Parameters.scala:137:46]
wire _address_ok_T_169 = _address_ok_T_168 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_13 = _address_ok_T_169; // @[Parameters.scala:612:40]
wire [27:0] _GEN_19 = io_in_c_bits_address_0[27:0] ^ 28'hC000000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_170 = {io_in_c_bits_address_0[31:28], _GEN_19}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_171 = {1'h0, _address_ok_T_170}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_172 = _address_ok_T_171 & 33'h1FC000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_173 = _address_ok_T_172; // @[Parameters.scala:137:46]
wire _address_ok_T_174 = _address_ok_T_173 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_14 = _address_ok_T_174; // @[Parameters.scala:612:40]
wire [28:0] _GEN_20 = io_in_c_bits_address_0[28:0] ^ 29'h10020000; // @[Monitor.scala:36:7]
wire [31:0] _address_ok_T_175 = {io_in_c_bits_address_0[31:29], _GEN_20}; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_176 = {1'h0, _address_ok_T_175}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_177 = _address_ok_T_176 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_178 = _address_ok_T_177; // @[Parameters.scala:137:46]
wire _address_ok_T_179 = _address_ok_T_178 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_15 = _address_ok_T_179; // @[Parameters.scala:612:40]
wire [31:0] _address_ok_T_180 = io_in_c_bits_address_0 ^ 32'h80000000; // @[Monitor.scala:36:7]
wire [32:0] _address_ok_T_181 = {1'h0, _address_ok_T_180}; // @[Parameters.scala:137:{31,41}]
wire [32:0] _address_ok_T_182 = _address_ok_T_181 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}]
wire [32:0] _address_ok_T_183 = _address_ok_T_182; // @[Parameters.scala:137:46]
wire _address_ok_T_184 = _address_ok_T_183 == 33'h0; // @[Parameters.scala:137:{46,59}]
wire _address_ok_WIRE_1_16 = _address_ok_T_184; // @[Parameters.scala:612:40]
wire _address_ok_T_185 = _address_ok_WIRE_1_0 | _address_ok_WIRE_1_1; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_186 = _address_ok_T_185 | _address_ok_WIRE_1_2; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_187 = _address_ok_T_186 | _address_ok_WIRE_1_3; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_188 = _address_ok_T_187 | _address_ok_WIRE_1_4; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_189 = _address_ok_T_188 | _address_ok_WIRE_1_5; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_190 = _address_ok_T_189 | _address_ok_WIRE_1_6; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_191 = _address_ok_T_190 | _address_ok_WIRE_1_7; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_192 = _address_ok_T_191 | _address_ok_WIRE_1_8; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_193 = _address_ok_T_192 | _address_ok_WIRE_1_9; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_194 = _address_ok_T_193 | _address_ok_WIRE_1_10; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_195 = _address_ok_T_194 | _address_ok_WIRE_1_11; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_196 = _address_ok_T_195 | _address_ok_WIRE_1_12; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_197 = _address_ok_T_196 | _address_ok_WIRE_1_13; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_198 = _address_ok_T_197 | _address_ok_WIRE_1_14; // @[Parameters.scala:612:40, :636:64]
wire _address_ok_T_199 = _address_ok_T_198 | _address_ok_WIRE_1_15; // @[Parameters.scala:612:40, :636:64]
wire address_ok_1 = _address_ok_T_199 | _address_ok_WIRE_1_16; // @[Parameters.scala:612:40, :636:64]
wire _T_2656 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_2656; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_2656; // @[Decoupled.scala:51:35]
wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
wire _T_2730 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_2730; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_2730; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_2730; // @[Decoupled.scala:51:35]
wire _d_first_T_3; // @[Decoupled.scala:51:35]
assign _d_first_T_3 = _T_2730; // @[Decoupled.scala:51:35]
wire [26:0] _GEN_21 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_21; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_21; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_21; // @[package.scala:243:71]
wire [26:0] _d_first_beats1_decode_T_9; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_9 = _GEN_21; // @[package.scala:243:71]
wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_3 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg source_1; // @[Monitor.scala:541:22]
reg [2:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
wire _b_first_T = io_in_b_ready_0 & io_in_b_valid_0; // @[Decoupled.scala:51:35]
wire b_first_done = _b_first_T; // @[Decoupled.scala:51:35]
reg [8:0] b_first_counter; // @[Edges.scala:229:27]
wire [9:0] _b_first_counter1_T = {1'h0, b_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] b_first_counter1 = _b_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire b_first = b_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _b_first_last_T = b_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire [8:0] _b_first_count_T = ~b_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] _b_first_counter_T = b_first ? 9'h0 : b_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [1:0] param_2; // @[Monitor.scala:411:22]
reg source_2; // @[Monitor.scala:413:22]
reg [31:0] address_1; // @[Monitor.scala:414:22]
wire _T_2727 = io_in_c_ready_0 & io_in_c_valid_0; // @[Decoupled.scala:51:35]
wire _c_first_T; // @[Decoupled.scala:51:35]
assign _c_first_T = _T_2727; // @[Decoupled.scala:51:35]
wire _c_first_T_1; // @[Decoupled.scala:51:35]
assign _c_first_T_1 = _T_2727; // @[Decoupled.scala:51:35]
wire [11:0] _c_first_beats1_decode_T_1 = _c_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _c_first_beats1_decode_T_2 = ~_c_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [8:0] c_first_beats1_decode = _c_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46]
wire c_first_beats1_opdata = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire c_first_beats1_opdata_1 = io_in_c_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [8:0] c_first_beats1 = c_first_beats1_opdata ? c_first_beats1_decode : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [8:0] c_first_counter; // @[Edges.scala:229:27]
wire [9:0] _c_first_counter1_T = {1'h0, c_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] c_first_counter1 = _c_first_counter1_T[8:0]; // @[Edges.scala:230:28]
wire c_first = c_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _c_first_last_T = c_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _c_first_last_T_1 = c_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire c_first_last = _c_first_last_T | _c_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire c_first_done = c_first_last & _c_first_T; // @[Decoupled.scala:51:35]
wire [8:0] _c_first_count_T = ~c_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [8:0] c_first_count = c_first_beats1 & _c_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _c_first_counter_T = c_first ? c_first_beats1 : c_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_3; // @[Monitor.scala:515:22]
reg [2:0] param_3; // @[Monitor.scala:516:22]
reg [3:0] size_3; // @[Monitor.scala:517:22]
reg source_3; // @[Monitor.scala:518:22]
reg [31:0] address_2; // @[Monitor.scala:519:22]
reg [1:0] inflight; // @[Monitor.scala:614:27]
reg [7:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [15:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [1:0] a_set; // @[Monitor.scala:626:34]
wire [1:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [7:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [15:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [3:0] _GEN_22 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [3:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_22; // @[Monitor.scala:637:69]
wire [3:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_22; // @[Monitor.scala:637:69, :680:101]
wire [3:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_22; // @[Monitor.scala:637:69, :749:69]
wire [3:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_22; // @[Monitor.scala:637:69, :790:101]
wire [7:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [15:0] _a_opcode_lookup_T_6 = {8'h0, _a_opcode_lookup_T_1 & 8'hF}; // @[Monitor.scala:637:{44,97}]
wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [7:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [3:0] _GEN_23 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65]
wire [3:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_23; // @[Monitor.scala:641:65]
wire [3:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_23; // @[Monitor.scala:641:65, :681:99]
wire [3:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_23; // @[Monitor.scala:641:65, :750:67]
wire [3:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_23; // @[Monitor.scala:641:65, :791:99]
wire [15:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [15:0] _a_size_lookup_T_6 = _a_size_lookup_T_1 & 16'hFF; // @[Monitor.scala:641:{40,91}]
wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [1:0] _GEN_24 = {1'h0, io_in_a_bits_source_0}; // @[OneHot.scala:58:35]
wire [1:0] _GEN_25 = 2'h1 << _GEN_24; // @[OneHot.scala:58:35]
wire [1:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_25; // @[OneHot.scala:58:35]
wire [1:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_25; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T : 2'h0; // @[OneHot.scala:58:35]
wire _T_2582 = _T_2656 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_2582 ? _a_set_T : 2'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_2582 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_2582 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [3:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_2582 ? _a_opcodes_set_T_1[7:0] : 8'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [3:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77]
wire [19:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :660:{52,77}]
assign a_sizes_set = _T_2582 ? _a_sizes_set_T_1[15:0] : 16'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [1:0] d_clr; // @[Monitor.scala:664:34]
wire [1:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [7:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [15:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_26 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_26; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_26; // @[Monitor.scala:673:46, :783:46]
wire _T_2628 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [1:0] _GEN_27 = {1'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35]
wire [1:0] _GEN_28 = 2'h1 << _GEN_27; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_28; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_28; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_28; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_28; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_2628 & ~d_release_ack ? _d_clr_wo_ready_T : 2'h0; // @[OneHot.scala:58:35]
wire _T_2597 = _T_2730 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_2597 ? _d_clr_T : 2'h0; // @[OneHot.scala:58:35]
wire [30:0] _d_opcodes_clr_T_5 = 31'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_2597 ? _d_opcodes_clr_T_5[7:0] : 8'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [30:0] _d_sizes_clr_T_5 = 31'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_2597 ? _d_sizes_clr_T_5[15:0] : 16'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [1:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [1:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [1:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [7:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [7:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [7:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [15:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [15:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [15:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [1:0] inflight_1; // @[Monitor.scala:726:35]
reg [7:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
reg [15:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [11:0] _c_first_beats1_decode_T_4 = _c_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _c_first_beats1_decode_T_5 = ~_c_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [8:0] c_first_beats1_decode_1 = _c_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46]
wire [8:0] c_first_beats1_1 = c_first_beats1_opdata_1 ? c_first_beats1_decode_1 : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14]
reg [8:0] c_first_counter_1; // @[Edges.scala:229:27]
wire [9:0] _c_first_counter1_T_1 = {1'h0, c_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] c_first_counter1_1 = _c_first_counter1_T_1[8:0]; // @[Edges.scala:230:28]
wire c_first_1 = c_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _c_first_last_T_2 = c_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _c_first_last_T_3 = c_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire c_first_last_1 = _c_first_last_T_2 | _c_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire c_first_done_1 = c_first_last_1 & _c_first_T_1; // @[Decoupled.scala:51:35]
wire [8:0] _c_first_count_T_1 = ~c_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [8:0] c_first_count_1 = c_first_beats1_1 & _c_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _c_first_counter_T_1 = c_first_1 ? c_first_beats1_1 : c_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [1:0] c_set; // @[Monitor.scala:738:34]
wire [1:0] c_set_wo_ready; // @[Monitor.scala:739:34]
wire [7:0] c_opcodes_set; // @[Monitor.scala:740:34]
wire [15:0] c_sizes_set; // @[Monitor.scala:741:34]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [7:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [7:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [15:0] _c_opcode_lookup_T_6 = {8'h0, _c_opcode_lookup_T_1 & 8'hF}; // @[Monitor.scala:749:{44,97}]
wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [15:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [15:0] _c_size_lookup_T_6 = _c_size_lookup_T_1 & 16'hFF; // @[Monitor.scala:750:{42,93}]
wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [3:0] c_opcodes_set_interm; // @[Monitor.scala:754:40]
wire [4:0] c_sizes_set_interm; // @[Monitor.scala:755:40]
wire _same_cycle_resp_T_3 = io_in_c_valid_0 & c_first_1; // @[Monitor.scala:36:7, :759:26, :795:44]
wire _same_cycle_resp_T_4 = io_in_c_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _same_cycle_resp_T_5 = io_in_c_bits_opcode_0[1]; // @[Monitor.scala:36:7]
wire [1:0] _GEN_29 = {1'h0, io_in_c_bits_source_0}; // @[OneHot.scala:58:35]
wire [1:0] _GEN_30 = 2'h1 << _GEN_29; // @[OneHot.scala:58:35]
wire [1:0] _c_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _c_set_wo_ready_T = _GEN_30; // @[OneHot.scala:58:35]
wire [1:0] _c_set_T; // @[OneHot.scala:58:35]
assign _c_set_T = _GEN_30; // @[OneHot.scala:58:35]
assign c_set_wo_ready = _same_cycle_resp_T_3 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5 ? _c_set_wo_ready_T : 2'h0; // @[OneHot.scala:58:35]
wire _T_2669 = _T_2727 & c_first_1 & _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Decoupled.scala:51:35]
assign c_set = _T_2669 ? _c_set_T : 2'h0; // @[OneHot.scala:58:35]
wire [3:0] _c_opcodes_set_interm_T = {io_in_c_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :765:53]
wire [3:0] _c_opcodes_set_interm_T_1 = {_c_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:765:{53,61}]
assign c_opcodes_set_interm = _T_2669 ? _c_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:754:40, :763:{25,36,70}, :765:{28,61}]
wire [4:0] _c_sizes_set_interm_T = {io_in_c_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :766:51]
wire [4:0] _c_sizes_set_interm_T_1 = {_c_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:766:{51,59}]
assign c_sizes_set_interm = _T_2669 ? _c_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:755:40, :763:{25,36,70}, :766:{28,59}]
wire [3:0] _c_opcodes_set_T = {1'h0, io_in_c_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :767:79]
wire [18:0] _c_opcodes_set_T_1 = {15'h0, c_opcodes_set_interm} << _c_opcodes_set_T; // @[Monitor.scala:754:40, :767:{54,79}]
assign c_opcodes_set = _T_2669 ? _c_opcodes_set_T_1[7:0] : 8'h0; // @[Monitor.scala:740:34, :763:{25,36,70}, :767:{28,54}]
wire [3:0] _c_sizes_set_T = {io_in_c_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :768:77]
wire [19:0] _c_sizes_set_T_1 = {15'h0, c_sizes_set_interm} << _c_sizes_set_T; // @[Monitor.scala:755:40, :768:{52,77}]
assign c_sizes_set = _T_2669 ? _c_sizes_set_T_1[15:0] : 16'h0; // @[Monitor.scala:741:34, :763:{25,36,70}, :768:{28,52}]
wire _c_probe_ack_T = io_in_c_bits_opcode_0 == 3'h4; // @[Monitor.scala:36:7, :772:47]
wire _c_probe_ack_T_1 = io_in_c_bits_opcode_0 == 3'h5; // @[Monitor.scala:36:7, :772:95]
wire c_probe_ack = _c_probe_ack_T | _c_probe_ack_T_1; // @[Monitor.scala:772:{47,71,95}]
wire [1:0] d_clr_1; // @[Monitor.scala:774:34]
wire [1:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [7:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [15:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_2700 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_2700 & d_release_ack_1 ? _d_clr_wo_ready_T_1 : 2'h0; // @[OneHot.scala:58:35]
wire _T_2682 = _T_2730 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_2682 ? _d_clr_T_1 : 2'h0; // @[OneHot.scala:58:35]
wire [30:0] _d_opcodes_clr_T_11 = 31'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_2682 ? _d_opcodes_clr_T_11[7:0] : 8'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [30:0] _d_sizes_clr_T_11 = 31'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_2682 ? _d_sizes_clr_T_11[15:0] : 16'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_6 = _same_cycle_resp_T_4 & _same_cycle_resp_T_5; // @[Edges.scala:68:{36,40,51}]
wire _same_cycle_resp_T_7 = _same_cycle_resp_T_3 & _same_cycle_resp_T_6; // @[Monitor.scala:795:{44,55}]
wire _same_cycle_resp_T_8 = io_in_c_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113]
wire same_cycle_resp_1 = _same_cycle_resp_T_7 & _same_cycle_resp_T_8; // @[Monitor.scala:795:{55,88,113}]
wire [1:0] _inflight_T_3 = inflight_1 | c_set; // @[Monitor.scala:726:35, :738:34, :814:35]
wire [1:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [1:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [7:0] _inflight_opcodes_T_3 = inflight_opcodes_1 | c_opcodes_set; // @[Monitor.scala:727:35, :740:34, :815:43]
wire [7:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [7:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [15:0] _inflight_sizes_T_3 = inflight_sizes_1 | c_sizes_set; // @[Monitor.scala:728:35, :741:34, :816:41]
wire [15:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [15:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
wire [32:0] _watchdog_T_2 = {1'h0, watchdog_1} + 33'h1; // @[Monitor.scala:818:27, :823:26]
wire [31:0] _watchdog_T_3 = _watchdog_T_2[31:0]; // @[Monitor.scala:823:26]
reg [7:0] inflight_2; // @[Monitor.scala:828:27]
wire [11:0] _d_first_beats1_decode_T_10 = _d_first_beats1_decode_T_9[11:0]; // @[package.scala:243:{71,76}]
wire [11:0] _d_first_beats1_decode_T_11 = ~_d_first_beats1_decode_T_10; // @[package.scala:243:{46,76}]
wire [8:0] d_first_beats1_decode_3 = _d_first_beats1_decode_T_11[11:3]; // @[package.scala:243:46]
wire [8:0] d_first_beats1_3 = d_first_beats1_opdata_3 ? d_first_beats1_decode_3 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [8:0] d_first_counter_3; // @[Edges.scala:229:27]
wire [9:0] _d_first_counter1_T_3 = {1'h0, d_first_counter_3} - 10'h1; // @[Edges.scala:229:27, :230:28]
wire [8:0] d_first_counter1_3 = _d_first_counter1_T_3[8:0]; // @[Edges.scala:230:28]
wire d_first_3 = d_first_counter_3 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_6 = d_first_counter_3 == 9'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_7 = d_first_beats1_3 == 9'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_3 = _d_first_last_T_6 | _d_first_last_T_7; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_3 = d_first_last_3 & _d_first_T_3; // @[Decoupled.scala:51:35]
wire [8:0] _d_first_count_T_3 = ~d_first_counter1_3; // @[Edges.scala:230:28, :234:27]
wire [8:0] d_first_count_3 = d_first_beats1_3 & _d_first_count_T_3; // @[Edges.scala:221:14, :234:{25,27}]
wire [8:0] _d_first_counter_T_3 = d_first_3 ? d_first_beats1_3 : d_first_counter1_3; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [7:0] d_set; // @[Monitor.scala:833:25]
wire _T_2736 = _T_2730 & d_first_3 & io_in_d_bits_opcode_0[2] & ~(io_in_d_bits_opcode_0[1]); // @[Decoupled.scala:51:35]
wire [7:0] _GEN_31 = {5'h0, io_in_d_bits_sink_0}; // @[OneHot.scala:58:35]
wire [7:0] _d_set_T = 8'h1 << _GEN_31; // @[OneHot.scala:58:35]
assign d_set = _T_2736 ? _d_set_T : 8'h0; // @[OneHot.scala:58:35]
wire [7:0] e_clr; // @[Monitor.scala:839:25]
wire _T_2745 = io_in_e_ready_0 & io_in_e_valid_0; // @[Decoupled.scala:51:35]
wire [7:0] _GEN_32 = {5'h0, io_in_e_bits_sink_0}; // @[OneHot.scala:58:35]
wire [7:0] _e_clr_T = 8'h1 << _GEN_32; // @[OneHot.scala:58:35]
assign e_clr = _T_2745 ? _e_clr_T : 8'h0; // @[OneHot.scala:58:35] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_24( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [8:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [8:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [8:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [27:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [8:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_30 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_32 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_48 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_52 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_54 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_58 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_60 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_64 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_66 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_73 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_wo_ready_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_wo_ready_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_4_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_5_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_first_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_first_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_first_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_first_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_set_wo_ready_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_set_wo_ready_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_set_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_set_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_opcodes_set_interm_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_opcodes_set_interm_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_sizes_set_interm_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_sizes_set_interm_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_opcodes_set_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_opcodes_set_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_sizes_set_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_sizes_set_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_probe_ack_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_probe_ack_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_probe_ack_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_probe_ack_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _same_cycle_resp_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _same_cycle_resp_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _same_cycle_resp_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _same_cycle_resp_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _same_cycle_resp_WIRE_4_bits_source = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _same_cycle_resp_WIRE_5_bits_source = 9'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [4098:0] _c_opcodes_set_T_1 = 4099'h0; // @[Monitor.scala:767:54]
wire [4098:0] _c_sizes_set_T_1 = 4099'h0; // @[Monitor.scala:768:52]
wire [11:0] _c_opcodes_set_T = 12'h0; // @[Monitor.scala:767:79]
wire [11:0] _c_sizes_set_T = 12'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [511:0] _c_set_wo_ready_T = 512'h1; // @[OneHot.scala:58:35]
wire [511:0] _c_set_T = 512'h1; // @[OneHot.scala:58:35]
wire [1027:0] c_opcodes_set = 1028'h0; // @[Monitor.scala:740:34]
wire [1027:0] c_sizes_set = 1028'h0; // @[Monitor.scala:741:34]
wire [256:0] c_set = 257'h0; // @[Monitor.scala:738:34]
wire [256:0] c_set_wo_ready = 257'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [8:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [8:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 9'h90; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [6:0] _source_ok_T_1 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_T_7 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_T_13 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_T_19 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 7'h20; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 7'h21; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 7'h22; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 7'h23; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = io_in_a_bits_source_0 == 9'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31]
wire _source_ok_T_26 = io_in_a_bits_source_0 == 9'h41; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31]
wire _source_ok_T_27 = io_in_a_bits_source_0 == 9'h42; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31]
wire [5:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[5:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] _source_ok_T_28 = io_in_a_bits_source_0[8:6]; // @[Monitor.scala:36:7]
wire _source_ok_T_29 = _source_ok_T_28 == 3'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_31 = _source_ok_T_29; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_33 = _source_ok_T_31; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_8 = _source_ok_T_33; // @[Parameters.scala:1138:31]
wire _source_ok_T_34 = io_in_a_bits_source_0 == 9'h100; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_9 = _source_ok_T_34; // @[Parameters.scala:1138:31]
wire _source_ok_T_35 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_36 = _source_ok_T_35 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_37 = _source_ok_T_36 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_38 = _source_ok_T_37 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_39 = _source_ok_T_38 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_40 = _source_ok_T_39 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_41 = _source_ok_T_40 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_42 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [27:0] _is_aligned_T = {22'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 28'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_4 = _uncommonBits_T_4[5:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_9 = _uncommonBits_T_9[5:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_14 = _uncommonBits_T_14[5:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_19 = _uncommonBits_T_19[5:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_24 = _uncommonBits_T_24[5:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_29 = _uncommonBits_T_29[5:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_34 = _uncommonBits_T_34[5:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_39 = _uncommonBits_T_39[5:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_44 = _uncommonBits_T_44[5:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_49 = _uncommonBits_T_49[5:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}]
wire [5:0] uncommonBits_54 = _uncommonBits_T_54[5:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_43 = io_in_d_bits_source_0 == 9'h90; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_43; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [6:0] _source_ok_T_44 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_T_50 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_T_56 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_T_62 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_45 = _source_ok_T_44 == 7'h20; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_49 = _source_ok_T_47; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_49; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_51 = _source_ok_T_50 == 7'h21; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_53 = _source_ok_T_51; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_55 = _source_ok_T_53; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_55; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_57 = _source_ok_T_56 == 7'h22; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_59 = _source_ok_T_57; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_61 = _source_ok_T_59; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_61; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_63 = _source_ok_T_62 == 7'h23; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_65 = _source_ok_T_63; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_67 = _source_ok_T_65; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_67; // @[Parameters.scala:1138:31]
wire _source_ok_T_68 = io_in_d_bits_source_0 == 9'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_5 = _source_ok_T_68; // @[Parameters.scala:1138:31]
wire _source_ok_T_69 = io_in_d_bits_source_0 == 9'h41; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_69; // @[Parameters.scala:1138:31]
wire _source_ok_T_70 = io_in_d_bits_source_0 == 9'h42; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_70; // @[Parameters.scala:1138:31]
wire [5:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[5:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] _source_ok_T_71 = io_in_d_bits_source_0[8:6]; // @[Monitor.scala:36:7]
wire _source_ok_T_72 = _source_ok_T_71 == 3'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_74 = _source_ok_T_72; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_8 = _source_ok_T_76; // @[Parameters.scala:1138:31]
wire _source_ok_T_77 = io_in_d_bits_source_0 == 9'h100; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_9 = _source_ok_T_77; // @[Parameters.scala:1138:31]
wire _source_ok_T_78 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_79 = _source_ok_T_78 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_80 = _source_ok_T_79 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_81 = _source_ok_T_80 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_82 = _source_ok_T_81 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_83 = _source_ok_T_82 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_84 = _source_ok_T_83 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_85 = _source_ok_T_84 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_85 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1187 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1187; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1187; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [8:0] source; // @[Monitor.scala:390:22]
reg [27:0] address; // @[Monitor.scala:391:22]
wire _T_1260 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1260; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1260; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1260; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [8:0] source_1; // @[Monitor.scala:541:22]
reg [256:0] inflight; // @[Monitor.scala:614:27]
reg [1027:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [1027:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [256:0] a_set; // @[Monitor.scala:626:34]
wire [256:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [1027:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [1027:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [11:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [11:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [11:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [11:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [11:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [11:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [11:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [11:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [11:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [1027:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [1027:0] _a_opcode_lookup_T_6 = {1024'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [1027:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[1027:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [1027:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [1027:0] _a_size_lookup_T_6 = {1024'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [1027:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[1027:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [511:0] _GEN_2 = 512'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [511:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [511:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[256:0] : 257'h0; // @[OneHot.scala:58:35]
wire _T_1113 = _T_1187 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1113 ? _a_set_T[256:0] : 257'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1113 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1113 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [11:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [11:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [11:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [4098:0] _a_opcodes_set_T_1 = {4095'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1113 ? _a_opcodes_set_T_1[1027:0] : 1028'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [4098:0] _a_sizes_set_T_1 = {4095'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1113 ? _a_sizes_set_T_1[1027:0] : 1028'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [256:0] d_clr; // @[Monitor.scala:664:34]
wire [256:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [1027:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [1027:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1159 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [511:0] _GEN_5 = 512'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [511:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [511:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [511:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [511:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1159 & ~d_release_ack ? _d_clr_wo_ready_T[256:0] : 257'h0; // @[OneHot.scala:58:35]
wire _T_1128 = _T_1260 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1128 ? _d_clr_T[256:0] : 257'h0; // @[OneHot.scala:58:35]
wire [4110:0] _d_opcodes_clr_T_5 = 4111'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1128 ? _d_opcodes_clr_T_5[1027:0] : 1028'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [4110:0] _d_sizes_clr_T_5 = 4111'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1128 ? _d_sizes_clr_T_5[1027:0] : 1028'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [256:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [256:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [256:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [1027:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [1027:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [1027:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [1027:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [1027:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [1027:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [256:0] inflight_1; // @[Monitor.scala:726:35]
wire [256:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [1027:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [1027:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [1027:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [1027:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [1027:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [1027:0] _c_opcode_lookup_T_6 = {1024'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [1027:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[1027:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [1027:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [1027:0] _c_size_lookup_T_6 = {1024'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [1027:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[1027:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [256:0] d_clr_1; // @[Monitor.scala:774:34]
wire [256:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [1027:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [1027:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1231 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1231 & d_release_ack_1 ? _d_clr_wo_ready_T_1[256:0] : 257'h0; // @[OneHot.scala:58:35]
wire _T_1213 = _T_1260 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1213 ? _d_clr_T_1[256:0] : 257'h0; // @[OneHot.scala:58:35]
wire [4110:0] _d_opcodes_clr_T_11 = 4111'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1213 ? _d_opcodes_clr_T_11[1027:0] : 1028'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [4110:0] _d_sizes_clr_T_11 = 4111'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1213 ? _d_sizes_clr_T_11[1027:0] : 1028'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 9'h0; // @[Monitor.scala:36:7, :795:113]
wire [256:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [256:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [1027:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [1027:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [1027:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [1027:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulAddRecFN_e8_s24_17( // @[MulAddRecFN.scala:300:7]
input [32:0] io_a, // @[MulAddRecFN.scala:303:16]
output [32:0] io_out // @[MulAddRecFN.scala:303:16]
);
wire _mulAddRecFNToRaw_postMul_io_invalidExc; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isNaN; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isInf; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isZero; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_sign; // @[MulAddRecFN.scala:319:15]
wire [9:0] _mulAddRecFNToRaw_postMul_io_rawOut_sExp; // @[MulAddRecFN.scala:319:15]
wire [26:0] _mulAddRecFNToRaw_postMul_io_rawOut_sig; // @[MulAddRecFN.scala:319:15]
wire [23:0] _mulAddRecFNToRaw_preMul_io_mulAddA; // @[MulAddRecFN.scala:317:15]
wire [47:0] _mulAddRecFNToRaw_preMul_io_mulAddC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_signProd; // @[MulAddRecFN.scala:317:15]
wire [9:0] _mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags; // @[MulAddRecFN.scala:317:15]
wire [4:0] _mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist; // @[MulAddRecFN.scala:317:15]
wire [25:0] _mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC; // @[MulAddRecFN.scala:317:15]
wire [32:0] io_a_0 = io_a; // @[MulAddRecFN.scala:300:7]
wire io_detectTininess = 1'h1; // @[MulAddRecFN.scala:300:7, :303:16, :317:15, :319:15, :339:15]
wire [2:0] io_roundingMode = 3'h0; // @[MulAddRecFN.scala:300:7, :303:16, :319:15, :339:15]
wire [32:0] io_c = 33'h15800000; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [32:0] io_b = 33'h80000000; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [1:0] io_op = 2'h0; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [32:0] io_out_0; // @[MulAddRecFN.scala:300:7]
wire [4:0] io_exceptionFlags; // @[MulAddRecFN.scala:300:7]
wire [47:0] _mulAddResult_T = {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddA, 23'h0}; // @[MulAddRecFN.scala:317:15, :327:45]
wire [48:0] mulAddResult = {1'h0, _mulAddResult_T} + {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddC}; // @[MulAddRecFN.scala:317:15, :327:45, :328:50]
MulAddRecFNToRaw_preMul_e8_s24_17 mulAddRecFNToRaw_preMul ( // @[MulAddRecFN.scala:317:15]
.io_a (io_a_0), // @[MulAddRecFN.scala:300:7]
.io_mulAddA (_mulAddRecFNToRaw_preMul_io_mulAddA),
.io_mulAddC (_mulAddRecFNToRaw_preMul_io_mulAddC),
.io_toPostMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny),
.io_toPostMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB),
.io_toPostMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA),
.io_toPostMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA),
.io_toPostMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd),
.io_toPostMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum),
.io_toPostMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags),
.io_toPostMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist),
.io_toPostMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC),
.io_toPostMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC)
); // @[MulAddRecFN.scala:317:15]
MulAddRecFNToRaw_postMul_e8_s24_17 mulAddRecFNToRaw_postMul ( // @[MulAddRecFN.scala:319:15]
.io_fromPreMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_mulAddResult (mulAddResult), // @[MulAddRecFN.scala:328:50]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc),
.io_rawOut_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN),
.io_rawOut_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf),
.io_rawOut_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero),
.io_rawOut_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign),
.io_rawOut_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp),
.io_rawOut_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig)
); // @[MulAddRecFN.scala:319:15]
RoundRawFNToRecFN_e8_s24_29 roundRawFNToRecFN ( // @[MulAddRecFN.scala:339:15]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc), // @[MulAddRecFN.scala:319:15]
.io_in_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN), // @[MulAddRecFN.scala:319:15]
.io_in_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf), // @[MulAddRecFN.scala:319:15]
.io_in_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero), // @[MulAddRecFN.scala:319:15]
.io_in_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign), // @[MulAddRecFN.scala:319:15]
.io_in_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp), // @[MulAddRecFN.scala:319:15]
.io_in_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig), // @[MulAddRecFN.scala:319:15]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags)
); // @[MulAddRecFN.scala:339:15]
assign io_out = io_out_0; // @[MulAddRecFN.scala:300:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_121( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
output io_q // @[ShiftReg.scala:36:14]
);
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_201 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_246( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0, // @[Tile.scala:17:14]
output io_bad_dataflow // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
wire io_bad_dataflow_0; // @[Tile.scala:16:7]
PE_502 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0),
.io_bad_dataflow (io_bad_dataflow_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_8( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_48 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_52 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_54 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_58 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_60 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_64 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_66 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_70 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_72 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_25 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_26 = _source_ok_T_25 == 4'h4; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire _source_ok_T_31 = io_in_a_bits_source_0 == 7'h28; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31]
wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h29; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31]
wire _source_ok_T_33 = io_in_a_bits_source_0 == 7'h2A; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_33; // @[Parameters.scala:1138:31]
wire _source_ok_T_34 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_9 = _source_ok_T_34; // @[Parameters.scala:1138:31]
wire _source_ok_T_35 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_36 = _source_ok_T_35 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_37 = _source_ok_T_36 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_38 = _source_ok_T_37 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_39 = _source_ok_T_38 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_40 = _source_ok_T_39 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_41 = _source_ok_T_40 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_42 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [28:0] _is_aligned_T = {23'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_4 = _uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_9 = _uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_14 = _uncommonBits_T_14[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_29 = _uncommonBits_T_29[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_34 = _uncommonBits_T_34[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_39 = _uncommonBits_T_39[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_44 = _uncommonBits_T_44[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_54 = _uncommonBits_T_54[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_43 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_43; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_44 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_50 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_56 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_62 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_45 = _source_ok_T_44 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_49 = _source_ok_T_47; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_49; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_51 = _source_ok_T_50 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_53 = _source_ok_T_51; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_55 = _source_ok_T_53; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_55; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_57 = _source_ok_T_56 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_59 = _source_ok_T_57; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_61 = _source_ok_T_59; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_61; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_63 = _source_ok_T_62 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_65 = _source_ok_T_63; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_67 = _source_ok_T_65; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_67; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_68 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_69 = _source_ok_T_68 == 4'h4; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_71 = _source_ok_T_69; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_73 = _source_ok_T_71; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_5 = _source_ok_T_73; // @[Parameters.scala:1138:31]
wire _source_ok_T_74 = io_in_d_bits_source_0 == 7'h28; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_74; // @[Parameters.scala:1138:31]
wire _source_ok_T_75 = io_in_d_bits_source_0 == 7'h29; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_75; // @[Parameters.scala:1138:31]
wire _source_ok_T_76 = io_in_d_bits_source_0 == 7'h2A; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_76; // @[Parameters.scala:1138:31]
wire _source_ok_T_77 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_9 = _source_ok_T_77; // @[Parameters.scala:1138:31]
wire _source_ok_T_78 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_79 = _source_ok_T_78 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_80 = _source_ok_T_79 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_81 = _source_ok_T_80 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_82 = _source_ok_T_81 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_83 = _source_ok_T_82 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_84 = _source_ok_T_83 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_85 = _source_ok_T_84 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_85 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1253 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1253; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1253; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [28:0] address; // @[Monitor.scala:391:22]
wire _T_1326 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1326; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1326; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1326; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1179 = _T_1253 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1179 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1179 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1179 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1179 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1179 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1225 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1225 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1194 = _T_1326 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1194 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1194 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1194 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1297 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1297 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1279 = _T_1326 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1279 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1279 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1279 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File IDPool.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
class IDPool(numIds: Int, lateValid: Boolean = false, revocableSelect: Boolean = false) extends Module {
require (numIds > 0)
val idWidth = log2Up(numIds)
val io = IO(new Bundle {
val free = Flipped(Valid(UInt(idWidth.W)))
val alloc = if (revocableSelect) Decoupled(UInt(idWidth.W)) else Irrevocable(UInt(idWidth.W))
})
// True indicates that the id is available
val bitmap = RegInit(UInt(numIds.W), -1.S(numIds.W).asUInt)
val select = RegInit(0.U(idWidth.W))
val valid = RegInit(true.B)
io.alloc.valid := (if (lateValid) bitmap.orR else valid)
io.alloc.bits := (if (revocableSelect) PriorityEncoder(bitmap) else select)
val taken = Mux(io.alloc.ready, UIntToOH(io.alloc.bits, numIds), 0.U)
val allocated = Mux(io.free .valid, UIntToOH(io.free .bits, numIds), 0.U)
val bitmap1 = (bitmap & ~taken) | allocated
val select1 = PriorityEncoder(bitmap1)
val valid1 = ( (bitmap.orR && !((PopCount(bitmap) === 1.U) && io.alloc.ready)) // bitmap not zero, and not allocating last bit
|| io.free.valid)
// Clock gate the bitmap
when (io.alloc.ready || io.free.valid) {
bitmap := bitmap1
valid := valid1
}
// Make select irrevocable
when (io.alloc.ready || (!io.alloc.valid && io.free.valid)) {
select := select1
}
// No double freeing
assert (!io.free.valid || !(bitmap & ~taken)(io.free.bits))
// pre-calculations for timing
if (!lateValid) {
assert (valid === bitmap.orR)
}
if (!revocableSelect) {
when (io.alloc.valid && RegNext(io.alloc.ready || (!io.alloc.valid && io.free.valid))) {
assert (select === PriorityEncoder(bitmap))
}
}
}
| module IDPool_3( // @[IDPool.scala:8:7]
input clock, // @[IDPool.scala:8:7]
input reset, // @[IDPool.scala:8:7]
input io_free_valid, // @[IDPool.scala:12:14]
input [2:0] io_free_bits, // @[IDPool.scala:12:14]
input io_alloc_ready, // @[IDPool.scala:12:14]
output io_alloc_valid, // @[IDPool.scala:12:14]
output [2:0] io_alloc_bits // @[IDPool.scala:12:14]
);
wire [2:0] io_alloc_bits_0; // @[IDPool.scala:8:7]
wire io_free_valid_0 = io_free_valid; // @[IDPool.scala:8:7]
wire [2:0] io_free_bits_0 = io_free_bits; // @[IDPool.scala:8:7]
wire io_alloc_ready_0 = io_alloc_ready; // @[IDPool.scala:8:7]
wire [2:0] allocated_shiftAmount = io_free_bits_0; // @[OneHot.scala:64:49]
wire [2:0] taken_shiftAmount = io_alloc_bits_0; // @[OneHot.scala:64:49]
wire io_alloc_valid_0; // @[IDPool.scala:8:7]
reg [7:0] bitmap; // @[IDPool.scala:18:23]
reg [2:0] select; // @[IDPool.scala:19:23]
assign io_alloc_bits_0 = select; // @[IDPool.scala:8:7, :19:23]
reg valid; // @[IDPool.scala:20:23]
assign io_alloc_valid_0 = valid; // @[IDPool.scala:8:7, :20:23]
wire [7:0] _taken_T = 8'h1 << taken_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [7:0] _taken_T_1 = _taken_T; // @[OneHot.scala:65:{12,27}]
wire [7:0] taken = io_alloc_ready_0 ? _taken_T_1 : 8'h0; // @[OneHot.scala:65:27]
wire [7:0] _allocated_T = 8'h1 << allocated_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [7:0] _allocated_T_1 = _allocated_T; // @[OneHot.scala:65:{12,27}]
wire [7:0] allocated = io_free_valid_0 ? _allocated_T_1 : 8'h0; // @[OneHot.scala:65:27]
wire [7:0] _bitmap1_T = ~taken; // @[IDPool.scala:25:19, :27:27]
wire [7:0] _bitmap1_T_1 = bitmap & _bitmap1_T; // @[IDPool.scala:18:23, :27:{25,27}]
wire [7:0] bitmap1 = _bitmap1_T_1 | allocated; // @[IDPool.scala:26:22, :27:{25,35}]
wire _select1_T = bitmap1[0]; // @[OneHot.scala:48:45]
wire _select1_T_1 = bitmap1[1]; // @[OneHot.scala:48:45]
wire _select1_T_2 = bitmap1[2]; // @[OneHot.scala:48:45]
wire _select1_T_3 = bitmap1[3]; // @[OneHot.scala:48:45]
wire _select1_T_4 = bitmap1[4]; // @[OneHot.scala:48:45]
wire _select1_T_5 = bitmap1[5]; // @[OneHot.scala:48:45]
wire _select1_T_6 = bitmap1[6]; // @[OneHot.scala:48:45]
wire _select1_T_7 = bitmap1[7]; // @[OneHot.scala:48:45]
wire [2:0] _select1_T_8 = {2'h3, ~_select1_T_6}; // @[OneHot.scala:48:45]
wire [2:0] _select1_T_9 = _select1_T_5 ? 3'h5 : _select1_T_8; // @[OneHot.scala:48:45]
wire [2:0] _select1_T_10 = _select1_T_4 ? 3'h4 : _select1_T_9; // @[OneHot.scala:48:45]
wire [2:0] _select1_T_11 = _select1_T_3 ? 3'h3 : _select1_T_10; // @[OneHot.scala:48:45]
wire [2:0] _select1_T_12 = _select1_T_2 ? 3'h2 : _select1_T_11; // @[OneHot.scala:48:45]
wire [2:0] _select1_T_13 = _select1_T_1 ? 3'h1 : _select1_T_12; // @[OneHot.scala:48:45]
wire [2:0] select1 = _select1_T ? 3'h0 : _select1_T_13; // @[OneHot.scala:48:45]
wire _valid1_T = |bitmap; // @[IDPool.scala:18:23, :29:28]
wire _valid1_T_1 = bitmap[0]; // @[IDPool.scala:18:23, :29:46]
wire _valid1_T_2 = bitmap[1]; // @[IDPool.scala:18:23, :29:46]
wire _valid1_T_3 = bitmap[2]; // @[IDPool.scala:18:23, :29:46]
wire _valid1_T_4 = bitmap[3]; // @[IDPool.scala:18:23, :29:46]
wire _valid1_T_5 = bitmap[4]; // @[IDPool.scala:18:23, :29:46]
wire _valid1_T_6 = bitmap[5]; // @[IDPool.scala:18:23, :29:46]
wire _valid1_T_7 = bitmap[6]; // @[IDPool.scala:18:23, :29:46]
wire _valid1_T_8 = bitmap[7]; // @[IDPool.scala:18:23, :29:46]
wire [1:0] _valid1_T_9 = {1'h0, _valid1_T_1} + {1'h0, _valid1_T_2}; // @[IDPool.scala:29:46]
wire [1:0] _valid1_T_10 = _valid1_T_9; // @[IDPool.scala:29:46]
wire [1:0] _valid1_T_11 = {1'h0, _valid1_T_3} + {1'h0, _valid1_T_4}; // @[IDPool.scala:29:46]
wire [1:0] _valid1_T_12 = _valid1_T_11; // @[IDPool.scala:29:46]
wire [2:0] _valid1_T_13 = {1'h0, _valid1_T_10} + {1'h0, _valid1_T_12}; // @[IDPool.scala:29:46]
wire [2:0] _valid1_T_14 = _valid1_T_13; // @[IDPool.scala:29:46]
wire [1:0] _valid1_T_15 = {1'h0, _valid1_T_5} + {1'h0, _valid1_T_6}; // @[IDPool.scala:29:46]
wire [1:0] _valid1_T_16 = _valid1_T_15; // @[IDPool.scala:29:46]
wire [1:0] _valid1_T_17 = {1'h0, _valid1_T_7} + {1'h0, _valid1_T_8}; // @[IDPool.scala:29:46]
wire [1:0] _valid1_T_18 = _valid1_T_17; // @[IDPool.scala:29:46]
wire [2:0] _valid1_T_19 = {1'h0, _valid1_T_16} + {1'h0, _valid1_T_18}; // @[IDPool.scala:29:46]
wire [2:0] _valid1_T_20 = _valid1_T_19; // @[IDPool.scala:29:46]
wire [3:0] _valid1_T_21 = {1'h0, _valid1_T_14} + {1'h0, _valid1_T_20}; // @[IDPool.scala:29:46]
wire [3:0] _valid1_T_22 = _valid1_T_21; // @[IDPool.scala:29:46]
wire _valid1_T_23 = _valid1_T_22 == 4'h1; // @[IDPool.scala:29:{46,55}]
wire _valid1_T_24 = _valid1_T_23 & io_alloc_ready_0; // @[IDPool.scala:8:7, :29:{55,64}]
wire _valid1_T_25 = ~_valid1_T_24; // @[IDPool.scala:29:{35,64}]
wire _valid1_T_26 = _valid1_T & _valid1_T_25; // @[IDPool.scala:29:{28,32,35}]
wire valid1 = _valid1_T_26 | io_free_valid_0; // @[IDPool.scala:8:7, :29:32, :30:17]
reg REG; // @[IDPool.scala:51:36] |
Generate the Verilog code corresponding to the following Chisel files.
File BankBinder.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
case class BankBinderNode(mask: BigInt)(implicit valName: ValName) extends TLCustomNode
{
private val bit = mask & -mask
val maxXfer = TransferSizes(1, if (bit == 0 || bit > 4096) 4096 else bit.toInt)
val ids = AddressSet.enumerateMask(mask)
def resolveStar(iKnown: Int, oKnown: Int, iStars: Int, oStars: Int): (Int, Int) = {
val ports = ids.size
val oStar = if (oStars == 0) 0 else (ports - oKnown) / oStars
val iStar = if (iStars == 0) 0 else (ports - iKnown) / iStars
require (ports == iKnown + iStar*iStars, s"${name} must have ${ports} inputs, but has ${iKnown} + ${iStar}*${iStars} (at ${lazyModule.line})")
require (ports == oKnown + oStar*oStars, s"${name} must have ${ports} outputs, but has ${oKnown} + ${oStar}*${oStars} (at ${lazyModule.line})")
(iStar, oStar)
}
def mapParamsD(n: Int, p: Seq[TLMasterPortParameters]): Seq[TLMasterPortParameters] =
(p zip ids) map { case (cp, id) => cp.v1copy(clients = cp.clients.map { c => c.v1copy(
visibility = c.visibility.flatMap { a => a.intersect(AddressSet(id, ~mask))},
supportsProbe = c.supports.probe intersect maxXfer,
supportsArithmetic = c.supports.arithmetic intersect maxXfer,
supportsLogical = c.supports.logical intersect maxXfer,
supportsGet = c.supports.get intersect maxXfer,
supportsPutFull = c.supports.putFull intersect maxXfer,
supportsPutPartial = c.supports.putPartial intersect maxXfer,
supportsHint = c.supports.hint intersect maxXfer)})}
def mapParamsU(n: Int, p: Seq[TLSlavePortParameters]): Seq[TLSlavePortParameters] =
(p zip ids) map { case (mp, id) => mp.v1copy(managers = mp.managers.flatMap { m =>
val addresses = m.address.flatMap(a => a.intersect(AddressSet(id, ~mask)))
if (addresses.nonEmpty)
Some(m.v1copy(
address = addresses,
supportsAcquireT = m.supportsAcquireT intersect maxXfer,
supportsAcquireB = m.supportsAcquireB intersect maxXfer,
supportsArithmetic = m.supportsArithmetic intersect maxXfer,
supportsLogical = m.supportsLogical intersect maxXfer,
supportsGet = m.supportsGet intersect maxXfer,
supportsPutFull = m.supportsPutFull intersect maxXfer,
supportsPutPartial = m.supportsPutPartial intersect maxXfer,
supportsHint = m.supportsHint intersect maxXfer))
else None
})}
}
/* A BankBinder is used to divide contiguous memory regions into banks, suitable for a cache */
class BankBinder(mask: BigInt)(implicit p: Parameters) extends LazyModule
{
val node = BankBinderNode(mask)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out <> in
}
}
}
object BankBinder
{
def apply(mask: BigInt)(implicit p: Parameters): TLNode = {
val binder = LazyModule(new BankBinder(mask))
binder.node
}
def apply(nBanks: Int, granularity: Int)(implicit p: Parameters): TLNode = {
if (nBanks > 0) apply(granularity * (nBanks-1))
else TLTempNode()
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module BankBinder( // @[BankBinder.scala:61:9]
input clock, // @[BankBinder.scala:61:9]
input reset, // @[BankBinder.scala:61:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[BankBinder.scala:61:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[BankBinder.scala:61:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[BankBinder.scala:61:9]
wire [2:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[BankBinder.scala:61:9]
wire [4:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[BankBinder.scala:61:9]
wire [31:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[BankBinder.scala:61:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[BankBinder.scala:61:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[BankBinder.scala:61:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[BankBinder.scala:61:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[BankBinder.scala:61:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[BankBinder.scala:61:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[BankBinder.scala:61:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[BankBinder.scala:61:9]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[BankBinder.scala:61:9]
wire [2:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[BankBinder.scala:61:9]
wire [4:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[BankBinder.scala:61:9]
wire auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[BankBinder.scala:61:9]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[BankBinder.scala:61:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[BankBinder.scala:61:9]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[BankBinder.scala:61:9]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[BankBinder.scala:61:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[BankBinder.scala:61:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[BankBinder.scala:61:9]
wire [2:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[BankBinder.scala:61:9]
wire [4:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[BankBinder.scala:61:9]
wire [31:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[BankBinder.scala:61:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[BankBinder.scala:61:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[BankBinder.scala:61:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[BankBinder.scala:61:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[BankBinder.scala:61:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[BankBinder.scala:61:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[BankBinder.scala:61:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[BankBinder.scala:61:9]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[BankBinder.scala:61:9]
wire [2:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[BankBinder.scala:61:9]
wire [4:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[BankBinder.scala:61:9]
wire nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[BankBinder.scala:61:9]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[BankBinder.scala:61:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[BankBinder.scala:61:9]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[BankBinder.scala:61:9]
wire auto_in_a_ready_0; // @[BankBinder.scala:61:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[BankBinder.scala:61:9]
wire [1:0] auto_in_d_bits_param_0; // @[BankBinder.scala:61:9]
wire [2:0] auto_in_d_bits_size_0; // @[BankBinder.scala:61:9]
wire [4:0] auto_in_d_bits_source_0; // @[BankBinder.scala:61:9]
wire auto_in_d_bits_sink_0; // @[BankBinder.scala:61:9]
wire auto_in_d_bits_denied_0; // @[BankBinder.scala:61:9]
wire [63:0] auto_in_d_bits_data_0; // @[BankBinder.scala:61:9]
wire auto_in_d_bits_corrupt_0; // @[BankBinder.scala:61:9]
wire auto_in_d_valid_0; // @[BankBinder.scala:61:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[BankBinder.scala:61:9]
wire [2:0] auto_out_a_bits_param_0; // @[BankBinder.scala:61:9]
wire [2:0] auto_out_a_bits_size_0; // @[BankBinder.scala:61:9]
wire [4:0] auto_out_a_bits_source_0; // @[BankBinder.scala:61:9]
wire [31:0] auto_out_a_bits_address_0; // @[BankBinder.scala:61:9]
wire [7:0] auto_out_a_bits_mask_0; // @[BankBinder.scala:61:9]
wire [63:0] auto_out_a_bits_data_0; // @[BankBinder.scala:61:9]
wire auto_out_a_bits_corrupt_0; // @[BankBinder.scala:61:9]
wire auto_out_a_valid_0; // @[BankBinder.scala:61:9]
wire auto_out_d_ready_0; // @[BankBinder.scala:61:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[BankBinder.scala:61:9]
assign nodeOut_a_valid = nodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_opcode = nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_param = nodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_size = nodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_source = nodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_address = nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_mask = nodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_data = nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_a_bits_corrupt = nodeIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_d_ready = nodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[BankBinder.scala:61:9]
assign nodeIn_a_ready = nodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[BankBinder.scala:61:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[BankBinder.scala:61:9]
assign nodeIn_d_valid = nodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_opcode = nodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_param = nodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_size = nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_source = nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_sink = nodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_denied = nodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_data = nodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_d_bits_corrupt = nodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
TLMonitor_42 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
assign auto_in_a_ready = auto_in_a_ready_0; // @[BankBinder.scala:61:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[BankBinder.scala:61:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[BankBinder.scala:61:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[BankBinder.scala:61:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[BankBinder.scala:61:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[BankBinder.scala:61:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_64( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data = 64'h0; // @[Monitor.scala:36:7]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_44 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_50 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_52 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_56 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_58 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_62 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_64 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_68 = 1'h1; // @[Parameters.scala:56:32]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74]
wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_25 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_26 = _source_ok_T_25 == 4'h4; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_29 = source_ok_uncommonBits_4 < 3'h5; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_30 = _source_ok_T_28 & _source_ok_T_29; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire _source_ok_T_31 = io_in_a_bits_source_0 == 7'h25; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31]
wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h28; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31]
wire _source_ok_T_33 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_33; // @[Parameters.scala:1138:31]
wire _source_ok_T_34 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_36 = _source_ok_T_35 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_37 = _source_ok_T_36 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_38 = _source_ok_T_37 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_39 = _source_ok_T_38 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_40 = _source_ok_T_39 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_40 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [20:0] _is_aligned_T = {15'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_4 = _uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_9 = _uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_14 = _uncommonBits_T_14[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_29 = _uncommonBits_T_29[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_34 = _uncommonBits_T_34[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_39 = _uncommonBits_T_39[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_44 = _uncommonBits_T_44[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}]
wire [2:0] uncommonBits_54 = _uncommonBits_T_54[2:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_41 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_41; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_42 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_48 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_54 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_60 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_43 = _source_ok_T_42 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_45 = _source_ok_T_43; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_47; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_49 = _source_ok_T_48 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_51 = _source_ok_T_49; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_53 = _source_ok_T_51; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_53; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_55 = _source_ok_T_54 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_57 = _source_ok_T_55; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_59 = _source_ok_T_57; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_59; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_61 = _source_ok_T_60 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_63 = _source_ok_T_61; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_65 = _source_ok_T_63; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_65; // @[Parameters.scala:1138:31]
wire [2:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}]
wire [3:0] _source_ok_T_66 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7]
wire _source_ok_T_67 = _source_ok_T_66 == 4'h4; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_69 = _source_ok_T_67; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_70 = source_ok_uncommonBits_9 < 3'h5; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_71 = _source_ok_T_69 & _source_ok_T_70; // @[Parameters.scala:54:67, :56:48, :57:20]
wire _source_ok_WIRE_1_5 = _source_ok_T_71; // @[Parameters.scala:1138:31]
wire _source_ok_T_72 = io_in_d_bits_source_0 == 7'h25; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_72; // @[Parameters.scala:1138:31]
wire _source_ok_T_73 = io_in_d_bits_source_0 == 7'h28; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_73; // @[Parameters.scala:1138:31]
wire _source_ok_T_74 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_74; // @[Parameters.scala:1138:31]
wire _source_ok_T_75 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_76 = _source_ok_T_75 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_77 = _source_ok_T_76 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_78 = _source_ok_T_77 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_79 = _source_ok_T_78 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_80 = _source_ok_T_79 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_81 = _source_ok_T_80 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_81 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1149 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1149; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1149; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [20:0] address; // @[Monitor.scala:391:22]
wire _T_1217 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1217; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1217; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1217; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1082 = _T_1149 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1082 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1082 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1082 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1082 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1082 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1128 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1128 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1097 = _T_1217 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1097 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1097 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1097 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1193 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1193 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1175 = _T_1217 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1175 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1175 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1175 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftRegisterPriorityQueue.scala:
package compressacc
import chisel3._
import chisel3.util._
import chisel3.util._
// TODO : support enq & deq at the same cycle
class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle {
val output_prev = KeyValue(keyWidth, value)
val output_nxt = KeyValue(keyWidth, value)
val input_prev = Flipped(KeyValue(keyWidth, value))
val input_nxt = Flipped(KeyValue(keyWidth, value))
val cmd = Flipped(Valid(UInt(1.W)))
val insert_here = Input(Bool())
val cur_input_keyval = Flipped(KeyValue(keyWidth, value))
val cur_output_keyval = KeyValue(keyWidth, value)
}
class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module {
val io = IO(new PriorityQueueStageIO(keyWidth, value))
dontTouch(io)
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val MAX_VALUE = (1 << keyWidth) - 1
val key_reg = RegInit(MAX_VALUE.U(keyWidth.W))
val value_reg = Reg(value)
io.output_prev.key := key_reg
io.output_prev.value := value_reg
io.output_nxt.key := key_reg
io.output_nxt.value := value_reg
io.cur_output_keyval.key := key_reg
io.cur_output_keyval.value := value_reg
when (io.cmd.valid) {
switch (io.cmd.bits) {
is (CMD_DEQ) {
key_reg := io.input_nxt.key
value_reg := io.input_nxt.value
}
is (CMD_ENQ) {
when (io.insert_here) {
key_reg := io.cur_input_keyval.key
value_reg := io.cur_input_keyval.value
} .elsewhen (key_reg >= io.cur_input_keyval.key) {
key_reg := io.input_prev.key
value_reg := io.input_prev.value
} .otherwise {
// do nothing
}
}
}
}
}
object PriorityQueueStage {
def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v)
}
// TODO
// - This design is not scalable as the enqued_keyval is broadcasted to all the stages
// - Add pipeline registers later
class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle {
val cnt_bits = log2Ceil(queSize+1)
val counter = Output(UInt(cnt_bits.W))
val enq = Flipped(Decoupled(KeyValue(keyWidth, value)))
val deq = Decoupled(KeyValue(keyWidth, value))
}
class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module {
val keyWidthInternal = keyWidth + 1
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value))
dontTouch(io)
val MAX_VALUE = ((1 << keyWidthInternal) - 1).U
val cnt_bits = log2Ceil(queSize+1)
// do not consider cases where we are inserting more entries then the queSize
val counter = RegInit(0.U(cnt_bits.W))
io.counter := counter
val full = (counter === queSize.U)
val empty = (counter === 0.U)
io.deq.valid := !empty
io.enq.ready := !full
when (io.enq.fire) {
counter := counter + 1.U
}
when (io.deq.fire) {
counter := counter - 1.U
}
val cmd_valid = io.enq.valid || io.deq.ready
val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ)
assert(!(io.enq.valid && io.deq.ready))
val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value)))
for (i <- 0 until (queSize - 1)) {
stages(i+1).io.input_prev <> stages(i).io.output_nxt
stages(i).io.input_nxt <> stages(i+1).io.output_prev
}
stages(queSize-1).io.input_nxt.key := MAX_VALUE
// stages(queSize-1).io.input_nxt.value :=
stages(queSize-1).io.input_nxt.value.symbol := 0.U
// stages(queSize-1).io.input_nxt.value.child(0) := 0.U
// stages(queSize-1).io.input_nxt.value.child(1) := 0.U
stages(0).io.input_prev.key := io.enq.bits.key
stages(0).io.input_prev.value <> io.enq.bits.value
for (i <- 0 until queSize) {
stages(i).io.cmd.valid := cmd_valid
stages(i).io.cmd.bits := cmd
stages(i).io.cur_input_keyval <> io.enq.bits
}
val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B)))
for (i <- 0 until queSize) {
is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key)
}
val is_large_or_equal_cat = Wire(UInt(queSize.W))
is_large_or_equal_cat := Cat(is_large_or_equal.reverse)
val insert_here_idx = PriorityEncoder(is_large_or_equal_cat)
for (i <- 0 until queSize) {
when (i.U === insert_here_idx) {
stages(i).io.insert_here := true.B
} .otherwise {
stages(i).io.insert_here := false.B
}
}
io.deq.bits <> stages(0).io.output_prev
}
| module PriorityQueueStage_5( // @[ShiftRegisterPriorityQueue.scala:21:7]
input clock, // @[ShiftRegisterPriorityQueue.scala:21:7]
input reset, // @[ShiftRegisterPriorityQueue.scala:21:7]
output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14]
);
wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24]
assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22]
assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30]
always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24]
else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
always @(posedge)
assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputBuffer_23( // @[InputUnit.scala:49:7]
input clock, // @[InputUnit.scala:49:7]
input reset, // @[InputUnit.scala:49:7]
input io_enq_0_valid, // @[InputUnit.scala:51:14]
input io_enq_0_bits_head, // @[InputUnit.scala:51:14]
input io_enq_0_bits_tail, // @[InputUnit.scala:51:14]
input [72:0] io_enq_0_bits_payload, // @[InputUnit.scala:51:14]
input [4:0] io_enq_0_bits_virt_channel_id, // @[InputUnit.scala:51:14]
output io_deq_0_bits_head, // @[InputUnit.scala:51:14]
output io_deq_0_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_0_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_1_bits_head, // @[InputUnit.scala:51:14]
output io_deq_1_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_1_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_2_bits_head, // @[InputUnit.scala:51:14]
output io_deq_2_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_2_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_3_bits_head, // @[InputUnit.scala:51:14]
output io_deq_3_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_3_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_4_bits_head, // @[InputUnit.scala:51:14]
output io_deq_4_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_4_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_5_bits_head, // @[InputUnit.scala:51:14]
output io_deq_5_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_5_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_6_bits_head, // @[InputUnit.scala:51:14]
output io_deq_6_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_6_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_7_bits_head, // @[InputUnit.scala:51:14]
output io_deq_7_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_7_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_8_bits_head, // @[InputUnit.scala:51:14]
output io_deq_8_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_8_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_9_bits_head, // @[InputUnit.scala:51:14]
output io_deq_9_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_9_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_10_bits_head, // @[InputUnit.scala:51:14]
output io_deq_10_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_10_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_11_bits_head, // @[InputUnit.scala:51:14]
output io_deq_11_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_11_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_12_ready, // @[InputUnit.scala:51:14]
output io_deq_12_valid, // @[InputUnit.scala:51:14]
output io_deq_12_bits_head, // @[InputUnit.scala:51:14]
output io_deq_12_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_12_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_13_ready, // @[InputUnit.scala:51:14]
output io_deq_13_valid, // @[InputUnit.scala:51:14]
output io_deq_13_bits_head, // @[InputUnit.scala:51:14]
output io_deq_13_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_13_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_14_bits_head, // @[InputUnit.scala:51:14]
output io_deq_14_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_14_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_15_bits_head, // @[InputUnit.scala:51:14]
output io_deq_15_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_15_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_16_bits_head, // @[InputUnit.scala:51:14]
output io_deq_16_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_16_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_17_bits_head, // @[InputUnit.scala:51:14]
output io_deq_17_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_17_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_18_bits_head, // @[InputUnit.scala:51:14]
output io_deq_18_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_18_bits_payload, // @[InputUnit.scala:51:14]
output io_deq_19_bits_head, // @[InputUnit.scala:51:14]
output io_deq_19_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_19_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_20_ready, // @[InputUnit.scala:51:14]
output io_deq_20_valid, // @[InputUnit.scala:51:14]
output io_deq_20_bits_head, // @[InputUnit.scala:51:14]
output io_deq_20_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_20_bits_payload, // @[InputUnit.scala:51:14]
input io_deq_21_ready, // @[InputUnit.scala:51:14]
output io_deq_21_valid, // @[InputUnit.scala:51:14]
output io_deq_21_bits_head, // @[InputUnit.scala:51:14]
output io_deq_21_bits_tail, // @[InputUnit.scala:51:14]
output [72:0] io_deq_21_bits_payload // @[InputUnit.scala:51:14]
);
wire _qs_21_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_20_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_19_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_18_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_17_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_16_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_15_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_14_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_13_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_12_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_11_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_10_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_9_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_8_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_7_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_6_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_5_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_4_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_3_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_2_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_1_io_enq_ready; // @[InputUnit.scala:90:49]
wire _qs_0_io_enq_ready; // @[InputUnit.scala:90:49]
wire [74:0] _mem_ext_R0_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R1_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R2_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R3_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R4_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R5_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R6_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R7_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R8_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R9_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R10_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R11_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R12_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R13_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R14_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R15_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R16_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R17_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R18_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R19_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R20_data; // @[InputUnit.scala:85:18]
wire [74:0] _mem_ext_R21_data; // @[InputUnit.scala:85:18]
reg [3:0] heads_0; // @[InputUnit.scala:86:24]
reg [3:0] heads_1; // @[InputUnit.scala:86:24]
reg [3:0] heads_2; // @[InputUnit.scala:86:24]
reg [3:0] heads_3; // @[InputUnit.scala:86:24]
reg [3:0] heads_4; // @[InputUnit.scala:86:24]
reg [3:0] heads_5; // @[InputUnit.scala:86:24]
reg [3:0] heads_6; // @[InputUnit.scala:86:24]
reg [3:0] heads_7; // @[InputUnit.scala:86:24]
reg [3:0] heads_8; // @[InputUnit.scala:86:24]
reg [3:0] heads_9; // @[InputUnit.scala:86:24]
reg [3:0] heads_10; // @[InputUnit.scala:86:24]
reg [3:0] heads_11; // @[InputUnit.scala:86:24]
reg [3:0] heads_12; // @[InputUnit.scala:86:24]
reg [3:0] heads_13; // @[InputUnit.scala:86:24]
reg [3:0] heads_14; // @[InputUnit.scala:86:24]
reg [3:0] heads_15; // @[InputUnit.scala:86:24]
reg [3:0] heads_16; // @[InputUnit.scala:86:24]
reg [3:0] heads_17; // @[InputUnit.scala:86:24]
reg [3:0] heads_18; // @[InputUnit.scala:86:24]
reg [3:0] heads_19; // @[InputUnit.scala:86:24]
reg [3:0] heads_20; // @[InputUnit.scala:86:24]
reg [3:0] heads_21; // @[InputUnit.scala:86:24]
reg [3:0] tails_0; // @[InputUnit.scala:87:24]
reg [3:0] tails_1; // @[InputUnit.scala:87:24]
reg [3:0] tails_2; // @[InputUnit.scala:87:24]
reg [3:0] tails_3; // @[InputUnit.scala:87:24]
reg [3:0] tails_4; // @[InputUnit.scala:87:24]
reg [3:0] tails_5; // @[InputUnit.scala:87:24]
reg [3:0] tails_6; // @[InputUnit.scala:87:24]
reg [3:0] tails_7; // @[InputUnit.scala:87:24]
reg [3:0] tails_8; // @[InputUnit.scala:87:24]
reg [3:0] tails_9; // @[InputUnit.scala:87:24]
reg [3:0] tails_10; // @[InputUnit.scala:87:24]
reg [3:0] tails_11; // @[InputUnit.scala:87:24]
reg [3:0] tails_12; // @[InputUnit.scala:87:24]
reg [3:0] tails_13; // @[InputUnit.scala:87:24]
reg [3:0] tails_14; // @[InputUnit.scala:87:24]
reg [3:0] tails_15; // @[InputUnit.scala:87:24]
reg [3:0] tails_16; // @[InputUnit.scala:87:24]
reg [3:0] tails_17; // @[InputUnit.scala:87:24]
reg [3:0] tails_18; // @[InputUnit.scala:87:24]
reg [3:0] tails_19; // @[InputUnit.scala:87:24]
reg [3:0] tails_20; // @[InputUnit.scala:87:24]
reg [3:0] tails_21; // @[InputUnit.scala:87:24]
wire _tails_T_66 = io_enq_0_bits_virt_channel_id == 5'h0; // @[Mux.scala:32:36]
wire _tails_T_67 = io_enq_0_bits_virt_channel_id == 5'h1; // @[Mux.scala:32:36]
wire _tails_T_68 = io_enq_0_bits_virt_channel_id == 5'h2; // @[Mux.scala:32:36]
wire _tails_T_69 = io_enq_0_bits_virt_channel_id == 5'h3; // @[Mux.scala:32:36]
wire _tails_T_70 = io_enq_0_bits_virt_channel_id == 5'h4; // @[Mux.scala:32:36]
wire _tails_T_71 = io_enq_0_bits_virt_channel_id == 5'h5; // @[Mux.scala:32:36]
wire _tails_T_72 = io_enq_0_bits_virt_channel_id == 5'h6; // @[Mux.scala:32:36]
wire _tails_T_73 = io_enq_0_bits_virt_channel_id == 5'h7; // @[Mux.scala:32:36]
wire _tails_T_74 = io_enq_0_bits_virt_channel_id == 5'h8; // @[Mux.scala:32:36]
wire _tails_T_75 = io_enq_0_bits_virt_channel_id == 5'h9; // @[Mux.scala:32:36]
wire _tails_T_76 = io_enq_0_bits_virt_channel_id == 5'hA; // @[Mux.scala:32:36]
wire _tails_T_77 = io_enq_0_bits_virt_channel_id == 5'hB; // @[Mux.scala:32:36]
wire _tails_T_78 = io_enq_0_bits_virt_channel_id == 5'hC; // @[Mux.scala:32:36]
wire _tails_T_79 = io_enq_0_bits_virt_channel_id == 5'hD; // @[Mux.scala:32:36]
wire _tails_T_80 = io_enq_0_bits_virt_channel_id == 5'hE; // @[Mux.scala:32:36]
wire _tails_T_81 = io_enq_0_bits_virt_channel_id == 5'hF; // @[Mux.scala:32:36]
wire _tails_T_82 = io_enq_0_bits_virt_channel_id == 5'h10; // @[Mux.scala:32:36]
wire _tails_T_83 = io_enq_0_bits_virt_channel_id == 5'h11; // @[Mux.scala:32:36]
wire _tails_T_84 = io_enq_0_bits_virt_channel_id == 5'h12; // @[Mux.scala:32:36]
wire _tails_T_85 = io_enq_0_bits_virt_channel_id == 5'h13; // @[Mux.scala:32:36]
wire _tails_T_86 = io_enq_0_bits_virt_channel_id == 5'h14; // @[Mux.scala:32:36]
wire _tails_T_87 = io_enq_0_bits_virt_channel_id == 5'h15; // @[Mux.scala:32:36]
wire direct_to_q = (_tails_T_66 & _qs_0_io_enq_ready | _tails_T_67 & _qs_1_io_enq_ready | _tails_T_68 & _qs_2_io_enq_ready | _tails_T_69 & _qs_3_io_enq_ready | _tails_T_70 & _qs_4_io_enq_ready | _tails_T_71 & _qs_5_io_enq_ready | _tails_T_72 & _qs_6_io_enq_ready | _tails_T_73 & _qs_7_io_enq_ready | _tails_T_74 & _qs_8_io_enq_ready | _tails_T_75 & _qs_9_io_enq_ready | _tails_T_76 & _qs_10_io_enq_ready | _tails_T_77 & _qs_11_io_enq_ready | _tails_T_78 & _qs_12_io_enq_ready | _tails_T_79 & _qs_13_io_enq_ready | _tails_T_80 & _qs_14_io_enq_ready | _tails_T_81 & _qs_15_io_enq_ready | _tails_T_82 & _qs_16_io_enq_ready | _tails_T_83 & _qs_17_io_enq_ready | _tails_T_84 & _qs_18_io_enq_ready | _tails_T_85 & _qs_19_io_enq_ready | _tails_T_86 & _qs_20_io_enq_ready | _tails_T_87 & _qs_21_io_enq_ready) & (_tails_T_66 & heads_0 == tails_0 | _tails_T_67 & heads_1 == tails_1 | _tails_T_68 & heads_2 == tails_2 | _tails_T_69 & heads_3 == tails_3 | _tails_T_70 & heads_4 == tails_4 | _tails_T_71 & heads_5 == tails_5 | _tails_T_72 & heads_6 == tails_6 | _tails_T_73 & heads_7 == tails_7 | _tails_T_74 & heads_8 == tails_8 | _tails_T_75 & heads_9 == tails_9 | _tails_T_76 & heads_10 == tails_10 | _tails_T_77 & heads_11 == tails_11 | _tails_T_78 & heads_12 == tails_12 | _tails_T_79 & heads_13 == tails_13 | _tails_T_80 & heads_14 == tails_14 | _tails_T_81 & heads_15 == tails_15 | _tails_T_82 & heads_16 == tails_16 | _tails_T_83 & heads_17 == tails_17 | _tails_T_84 & heads_18 == tails_18 | _tails_T_85 & heads_19 == tails_19 | _tails_T_86 & heads_20 == tails_20 | _tails_T_87 & heads_21 == tails_21); // @[Mux.scala:30:73, :32:36]
wire mem_MPORT_en = io_enq_0_valid & ~direct_to_q; // @[InputUnit.scala:96:62, :100:{27,30}]
wire [31:0][3:0] _GEN = {{tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_0}, {tails_21}, {tails_20}, {tails_19}, {tails_18}, {tails_17}, {tails_16}, {tails_15}, {tails_14}, {tails_13}, {tails_12}, {tails_11}, {tails_10}, {tails_9}, {tails_8}, {tails_7}, {tails_6}, {tails_5}, {tails_4}, {tails_3}, {tails_2}, {tails_1}, {tails_0}}; // @[InputUnit.scala:87:24, :102:16]
wire _GEN_0 = io_enq_0_bits_virt_channel_id == 5'h0; // @[InputUnit.scala:103:45]
wire _GEN_1 = io_enq_0_bits_virt_channel_id == 5'h1; // @[InputUnit.scala:103:45]
wire _GEN_2 = io_enq_0_bits_virt_channel_id == 5'h2; // @[InputUnit.scala:103:45]
wire _GEN_3 = io_enq_0_bits_virt_channel_id == 5'h3; // @[InputUnit.scala:103:45]
wire _GEN_4 = io_enq_0_bits_virt_channel_id == 5'h4; // @[InputUnit.scala:103:45]
wire _GEN_5 = io_enq_0_bits_virt_channel_id == 5'h5; // @[InputUnit.scala:103:45]
wire _GEN_6 = io_enq_0_bits_virt_channel_id == 5'h6; // @[InputUnit.scala:103:45]
wire _GEN_7 = io_enq_0_bits_virt_channel_id == 5'h7; // @[InputUnit.scala:103:45]
wire _GEN_8 = io_enq_0_bits_virt_channel_id == 5'h8; // @[InputUnit.scala:103:45]
wire _GEN_9 = io_enq_0_bits_virt_channel_id == 5'h9; // @[InputUnit.scala:103:45]
wire _GEN_10 = io_enq_0_bits_virt_channel_id == 5'hA; // @[InputUnit.scala:103:45]
wire _GEN_11 = io_enq_0_bits_virt_channel_id == 5'hB; // @[InputUnit.scala:103:45]
wire _GEN_12 = io_enq_0_bits_virt_channel_id == 5'hC; // @[InputUnit.scala:103:45]
wire _GEN_13 = io_enq_0_bits_virt_channel_id == 5'hD; // @[InputUnit.scala:103:45]
wire _GEN_14 = io_enq_0_bits_virt_channel_id == 5'hE; // @[InputUnit.scala:103:45]
wire _GEN_15 = io_enq_0_bits_virt_channel_id == 5'hF; // @[InputUnit.scala:103:45]
wire _GEN_16 = io_enq_0_bits_virt_channel_id == 5'h10; // @[InputUnit.scala:103:45]
wire _GEN_17 = io_enq_0_bits_virt_channel_id == 5'h11; // @[InputUnit.scala:103:45]
wire _GEN_18 = io_enq_0_bits_virt_channel_id == 5'h12; // @[InputUnit.scala:103:45]
wire _GEN_19 = io_enq_0_bits_virt_channel_id == 5'h13; // @[InputUnit.scala:103:45]
wire _GEN_20 = io_enq_0_bits_virt_channel_id == 5'h14; // @[InputUnit.scala:103:45]
wire _GEN_21 = io_enq_0_bits_virt_channel_id == 5'h15; // @[InputUnit.scala:103:45]
wire _GEN_22 = io_enq_0_valid & direct_to_q; // @[InputUnit.scala:96:62, :107:34]
wire can_to_q_0 = heads_0 != tails_0 & _qs_0_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_1 = heads_1 != tails_1 & _qs_1_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_2 = heads_2 != tails_2 & _qs_2_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_3 = heads_3 != tails_3 & _qs_3_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_4 = heads_4 != tails_4 & _qs_4_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_5 = heads_5 != tails_5 & _qs_5_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_6 = heads_6 != tails_6 & _qs_6_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_7 = heads_7 != tails_7 & _qs_7_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_8 = heads_8 != tails_8 & _qs_8_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_9 = heads_9 != tails_9 & _qs_9_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_10 = heads_10 != tails_10 & _qs_10_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_11 = heads_11 != tails_11 & _qs_11_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_12 = heads_12 != tails_12 & _qs_12_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_13 = heads_13 != tails_13 & _qs_13_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_14 = heads_14 != tails_14 & _qs_14_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_15 = heads_15 != tails_15 & _qs_15_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_16 = heads_16 != tails_16 & _qs_16_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_17 = heads_17 != tails_17 & _qs_17_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_18 = heads_18 != tails_18 & _qs_18_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_19 = heads_19 != tails_19 & _qs_19_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_20 = heads_20 != tails_20 & _qs_20_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire can_to_q_21 = heads_21 != tails_21 & _qs_21_io_enq_ready; // @[InputUnit.scala:86:24, :87:24, :88:49, :90:49, :117:{60,70}]
wire [21:0] to_q_oh_enc = can_to_q_0 ? 22'h1 : can_to_q_1 ? 22'h2 : can_to_q_2 ? 22'h4 : can_to_q_3 ? 22'h8 : can_to_q_4 ? 22'h10 : can_to_q_5 ? 22'h20 : can_to_q_6 ? 22'h40 : can_to_q_7 ? 22'h80 : can_to_q_8 ? 22'h100 : can_to_q_9 ? 22'h200 : can_to_q_10 ? 22'h400 : can_to_q_11 ? 22'h800 : can_to_q_12 ? 22'h1000 : can_to_q_13 ? 22'h2000 : can_to_q_14 ? 22'h4000 : can_to_q_15 ? 22'h8000 : can_to_q_16 ? 22'h10000 : can_to_q_17 ? 22'h20000 : can_to_q_18 ? 22'h40000 : can_to_q_19 ? 22'h80000 : can_to_q_20 ? 22'h100000 : {can_to_q_21, 21'h0}; // @[Mux.scala:50:70]
wire _GEN_23 = can_to_q_0 | can_to_q_1 | can_to_q_2 | can_to_q_3 | can_to_q_4 | can_to_q_5 | can_to_q_6 | can_to_q_7 | can_to_q_8 | can_to_q_9 | can_to_q_10 | can_to_q_11 | can_to_q_12 | can_to_q_13 | can_to_q_14 | can_to_q_15 | can_to_q_16 | can_to_q_17 | can_to_q_18 | can_to_q_19 | can_to_q_20 | can_to_q_21; // @[package.scala:81:59]
wire [3:0] head = (to_q_oh_enc[0] ? heads_0 : 4'h0) | (to_q_oh_enc[1] ? heads_1 : 4'h0) | (to_q_oh_enc[2] ? heads_2 : 4'h0) | (to_q_oh_enc[3] ? heads_3 : 4'h0) | (to_q_oh_enc[4] ? heads_4 : 4'h0) | (to_q_oh_enc[5] ? heads_5 : 4'h0) | (to_q_oh_enc[6] ? heads_6 : 4'h0) | (to_q_oh_enc[7] ? heads_7 : 4'h0) | (to_q_oh_enc[8] ? heads_8 : 4'h0) | (to_q_oh_enc[9] ? heads_9 : 4'h0) | (to_q_oh_enc[10] ? heads_10 : 4'h0) | (to_q_oh_enc[11] ? heads_11 : 4'h0) | (to_q_oh_enc[12] ? heads_12 : 4'h0) | (to_q_oh_enc[13] ? heads_13 : 4'h0) | (to_q_oh_enc[14] ? heads_14 : 4'h0) | (to_q_oh_enc[15] ? heads_15 : 4'h0) | (to_q_oh_enc[16] ? heads_16 : 4'h0) | (to_q_oh_enc[17] ? heads_17 : 4'h0) | (to_q_oh_enc[18] ? heads_18 : 4'h0) | (to_q_oh_enc[19] ? heads_19 : 4'h0) | (to_q_oh_enc[20] ? heads_20 : 4'h0) | (to_q_oh_enc[21] ? heads_21 : 4'h0); // @[OneHot.scala:83:30]
wire _GEN_24 = _GEN_23 & to_q_oh_enc[0]; // @[OneHot.scala:83:30]
wire _GEN_25 = _GEN_23 & to_q_oh_enc[1]; // @[OneHot.scala:83:30]
wire _GEN_26 = _GEN_23 & to_q_oh_enc[2]; // @[OneHot.scala:83:30]
wire _GEN_27 = _GEN_23 & to_q_oh_enc[3]; // @[OneHot.scala:83:30]
wire _GEN_28 = _GEN_23 & to_q_oh_enc[4]; // @[OneHot.scala:83:30]
wire _GEN_29 = _GEN_23 & to_q_oh_enc[5]; // @[OneHot.scala:83:30]
wire _GEN_30 = _GEN_23 & to_q_oh_enc[6]; // @[OneHot.scala:83:30]
wire _GEN_31 = _GEN_23 & to_q_oh_enc[7]; // @[OneHot.scala:83:30]
wire _GEN_32 = _GEN_23 & to_q_oh_enc[8]; // @[OneHot.scala:83:30]
wire _GEN_33 = _GEN_23 & to_q_oh_enc[9]; // @[OneHot.scala:83:30]
wire _GEN_34 = _GEN_23 & to_q_oh_enc[10]; // @[OneHot.scala:83:30]
wire _GEN_35 = _GEN_23 & to_q_oh_enc[11]; // @[OneHot.scala:83:30]
wire _GEN_36 = _GEN_23 & to_q_oh_enc[12]; // @[OneHot.scala:83:30]
wire _GEN_37 = _GEN_23 & to_q_oh_enc[13]; // @[OneHot.scala:83:30]
wire _GEN_38 = _GEN_23 & to_q_oh_enc[14]; // @[OneHot.scala:83:30]
wire _GEN_39 = _GEN_23 & to_q_oh_enc[15]; // @[OneHot.scala:83:30]
wire _GEN_40 = _GEN_23 & to_q_oh_enc[16]; // @[OneHot.scala:83:30]
wire _GEN_41 = _GEN_23 & to_q_oh_enc[17]; // @[OneHot.scala:83:30]
wire _GEN_42 = _GEN_23 & to_q_oh_enc[18]; // @[OneHot.scala:83:30]
wire _GEN_43 = _GEN_23 & to_q_oh_enc[19]; // @[OneHot.scala:83:30]
wire _GEN_44 = _GEN_23 & to_q_oh_enc[20]; // @[OneHot.scala:83:30]
wire _GEN_45 = _GEN_23 & to_q_oh_enc[21]; // @[OneHot.scala:83:30]
wire [3:0] _tails_T_133 = _GEN[io_enq_0_bits_virt_channel_id] == ({1'h0, {1'h0, {2{_tails_T_78}}} | {3{_tails_T_79}}} | (_tails_T_86 ? 4'hB : 4'h0) | {4{_tails_T_87}}) ? {_tails_T_86, _tails_T_79, 2'h0} | (_tails_T_87 ? 4'hC : 4'h0) : _GEN[io_enq_0_bits_virt_channel_id] + 4'h1; // @[Mux.scala:30:73, :32:36]
wire [14:0] _to_q_T_2 = {10'h0, to_q_oh_enc[21:17]} | to_q_oh_enc[15:1]; // @[OneHot.scala:31:18, :32:28]
wire [6:0] _to_q_T_4 = _to_q_T_2[14:8] | _to_q_T_2[6:0]; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [2:0] _to_q_T_6 = _to_q_T_4[6:4] | _to_q_T_4[2:0]; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _to_q_T_8 = _to_q_T_6[2] | _to_q_T_6[0]; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [4:0] to_q = {|(to_q_oh_enc[21:16]), |(_to_q_T_2[14:7]), |(_to_q_T_4[6:3]), |(_to_q_T_6[2:1]), _to_q_T_8}; // @[OneHot.scala:30:18, :32:{10,14,28}]
wire [3:0] _heads_T_89 = head == ({1'h0, {1'h0, {2{to_q_oh_enc[12]}}} | {3{to_q_oh_enc[13]}}} | (to_q_oh_enc[20] ? 4'hB : 4'h0) | {4{to_q_oh_enc[21]}}) ? {to_q_oh_enc[20], to_q_oh_enc[13], 2'h0} | (to_q_oh_enc[21] ? 4'hC : 4'h0) : head + 4'h1; // @[OneHot.scala:83:30]
always @(posedge clock) begin // @[InputUnit.scala:49:7]
if (reset) begin // @[InputUnit.scala:49:7]
heads_0 <= 4'h0; // @[InputUnit.scala:86:24]
heads_1 <= 4'h0; // @[InputUnit.scala:86:24]
heads_2 <= 4'h0; // @[InputUnit.scala:86:24]
heads_3 <= 4'h0; // @[InputUnit.scala:86:24]
heads_4 <= 4'h0; // @[InputUnit.scala:86:24]
heads_5 <= 4'h0; // @[InputUnit.scala:86:24]
heads_6 <= 4'h0; // @[InputUnit.scala:86:24]
heads_7 <= 4'h0; // @[InputUnit.scala:86:24]
heads_8 <= 4'h0; // @[InputUnit.scala:86:24]
heads_9 <= 4'h0; // @[InputUnit.scala:86:24]
heads_10 <= 4'h0; // @[InputUnit.scala:86:24]
heads_11 <= 4'h0; // @[InputUnit.scala:86:24]
heads_12 <= 4'h0; // @[InputUnit.scala:86:24]
heads_13 <= 4'h4; // @[InputUnit.scala:86:24]
heads_14 <= 4'h0; // @[InputUnit.scala:86:24]
heads_15 <= 4'h0; // @[InputUnit.scala:86:24]
heads_16 <= 4'h0; // @[InputUnit.scala:86:24]
heads_17 <= 4'h0; // @[InputUnit.scala:86:24]
heads_18 <= 4'h0; // @[InputUnit.scala:86:24]
heads_19 <= 4'h0; // @[InputUnit.scala:86:24]
heads_20 <= 4'h8; // @[InputUnit.scala:86:24]
heads_21 <= 4'hC; // @[InputUnit.scala:86:24]
tails_0 <= 4'h0; // @[InputUnit.scala:87:24]
tails_1 <= 4'h0; // @[InputUnit.scala:87:24]
tails_2 <= 4'h0; // @[InputUnit.scala:87:24]
tails_3 <= 4'h0; // @[InputUnit.scala:87:24]
tails_4 <= 4'h0; // @[InputUnit.scala:87:24]
tails_5 <= 4'h0; // @[InputUnit.scala:87:24]
tails_6 <= 4'h0; // @[InputUnit.scala:87:24]
tails_7 <= 4'h0; // @[InputUnit.scala:87:24]
tails_8 <= 4'h0; // @[InputUnit.scala:87:24]
tails_9 <= 4'h0; // @[InputUnit.scala:87:24]
tails_10 <= 4'h0; // @[InputUnit.scala:87:24]
tails_11 <= 4'h0; // @[InputUnit.scala:87:24]
tails_12 <= 4'h0; // @[InputUnit.scala:87:24]
tails_13 <= 4'h4; // @[InputUnit.scala:87:24]
tails_14 <= 4'h0; // @[InputUnit.scala:87:24]
tails_15 <= 4'h0; // @[InputUnit.scala:87:24]
tails_16 <= 4'h0; // @[InputUnit.scala:87:24]
tails_17 <= 4'h0; // @[InputUnit.scala:87:24]
tails_18 <= 4'h0; // @[InputUnit.scala:87:24]
tails_19 <= 4'h0; // @[InputUnit.scala:87:24]
tails_20 <= 4'h8; // @[InputUnit.scala:87:24]
tails_21 <= 4'hC; // @[InputUnit.scala:87:24]
end
else begin // @[InputUnit.scala:49:7]
if (_GEN_23 & {to_q_oh_enc[21:16], |(_to_q_T_2[14:7]), |(_to_q_T_4[6:3]), |(_to_q_T_6[2:1]), _to_q_T_8} == 10'h0) // @[OneHot.scala:30:18, :32:{10,14,28}]
heads_0 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h1) // @[OneHot.scala:32:10]
heads_1 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h2) // @[OneHot.scala:32:10]
heads_2 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h3) // @[OneHot.scala:32:10]
heads_3 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h4) // @[OneHot.scala:32:10]
heads_4 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h5) // @[OneHot.scala:32:10]
heads_5 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h6) // @[OneHot.scala:32:10]
heads_6 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h7) // @[OneHot.scala:32:10]
heads_7 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h8) // @[OneHot.scala:32:10]
heads_8 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h9) // @[OneHot.scala:32:10]
heads_9 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hA) // @[OneHot.scala:32:10]
heads_10 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hB) // @[OneHot.scala:32:10]
heads_11 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hC) // @[OneHot.scala:32:10]
heads_12 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hD) // @[OneHot.scala:32:10]
heads_13 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hE) // @[OneHot.scala:32:10]
heads_14 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'hF) // @[OneHot.scala:32:10]
heads_15 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h10) // @[OneHot.scala:32:10]
heads_16 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h11) // @[OneHot.scala:32:10]
heads_17 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h12) // @[OneHot.scala:32:10]
heads_18 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h13) // @[OneHot.scala:32:10]
heads_19 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h14) // @[OneHot.scala:32:10]
heads_20 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (_GEN_23 & to_q == 5'h15) // @[OneHot.scala:32:10]
heads_21 <= _heads_T_89; // @[InputUnit.scala:86:24, :122:27]
if (mem_MPORT_en & _GEN_0) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_0 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_1) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_1 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_2) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_2 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_3) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_3 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_4) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_4 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_5) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_5 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_6) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_6 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_7) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_7 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_8) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_8 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_9) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_9 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_10) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_10 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_11) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_11 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_12) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_12 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_13) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_13 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_14) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_14 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_15) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_15 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_16) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_16 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_17) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_17 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_18) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_18 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_19) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_19 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_20) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_20 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
if (mem_MPORT_en & _GEN_21) // @[InputUnit.scala:87:24, :100:{27,44}, :103:45]
tails_21 <= _tails_T_133; // @[InputUnit.scala:87:24, :103:51]
end
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File EgressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
class EgressUnit(coupleSAVA: Boolean, combineSAST: Boolean, inParams: Seq[ChannelParams], ingressParams: Seq[IngressChannelParams], cParam: EgressChannelParams)
(implicit p: Parameters) extends AbstractOutputUnit(inParams, ingressParams, cParam)(p) {
class EgressUnitIO extends AbstractOutputUnitIO(inParams, ingressParams, cParam) {
val out = Decoupled(new EgressFlit(cParam.payloadBits))
}
val io = IO(new EgressUnitIO)
val channel_empty = RegInit(true.B)
val flow = Reg(new FlowRoutingBundle)
val q = Module(new Queue(new EgressFlit(cParam.payloadBits), 3 - (if (combineSAST) 1 else 0), flow=true))
q.io.enq.valid := io.in(0).valid
q.io.enq.bits.head := io.in(0).bits.head
q.io.enq.bits.tail := io.in(0).bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
q.io.enq.bits.ingress_id := 0.U(1.W)
} else {
q.io.enq.bits.ingress_id := Mux1H(
flows.map(f => (f.ingressNode.U === io.in(0).bits.flow.ingress_node &&
f.ingressNodeId.U === io.in(0).bits.flow.ingress_node_id)),
flows.map(f => f.ingressId.U(ingressIdBits.W))
)
}
q.io.enq.bits.payload := io.in(0).bits.payload
io.out <> q.io.deq
assert(!(q.io.enq.valid && !q.io.enq.ready))
io.credit_available(0) := q.io.count === 0.U
io.channel_status(0).occupied := !channel_empty
io.channel_status(0).flow := flow
when (io.credit_alloc(0).alloc && io.credit_alloc(0).tail) {
channel_empty := true.B
if (coupleSAVA) io.channel_status(0).occupied := false.B
}
when (io.allocs(0).alloc) {
channel_empty := false.B
flow := io.allocs(0).flow
}
}
| module EgressUnit_18( // @[EgressUnit.scala:12:7]
input clock, // @[EgressUnit.scala:12:7]
input reset, // @[EgressUnit.scala:12:7]
input io_in_0_valid, // @[EgressUnit.scala:18:14]
input io_in_0_bits_head, // @[EgressUnit.scala:18:14]
input io_in_0_bits_tail, // @[EgressUnit.scala:18:14]
input [72:0] io_in_0_bits_payload, // @[EgressUnit.scala:18:14]
input [5:0] io_in_0_bits_flow_ingress_node, // @[EgressUnit.scala:18:14]
input [2:0] io_in_0_bits_flow_ingress_node_id, // @[EgressUnit.scala:18:14]
output io_credit_available_0, // @[EgressUnit.scala:18:14]
output io_channel_status_0_occupied, // @[EgressUnit.scala:18:14]
input io_allocs_0_alloc, // @[EgressUnit.scala:18:14]
input io_credit_alloc_0_alloc, // @[EgressUnit.scala:18:14]
input io_credit_alloc_0_tail, // @[EgressUnit.scala:18:14]
input io_out_ready, // @[EgressUnit.scala:18:14]
output io_out_valid, // @[EgressUnit.scala:18:14]
output io_out_bits_head, // @[EgressUnit.scala:18:14]
output io_out_bits_tail, // @[EgressUnit.scala:18:14]
output [72:0] io_out_bits_payload // @[EgressUnit.scala:18:14]
);
wire _q_io_enq_ready; // @[EgressUnit.scala:22:17]
wire [1:0] _q_io_count; // @[EgressUnit.scala:22:17]
reg channel_empty; // @[EgressUnit.scala:20:30]
wire _q_io_enq_bits_ingress_id_T_4 = io_in_0_bits_flow_ingress_node_id == 3'h0; // @[EgressUnit.scala:32:27] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_330( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_412( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_275( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File AtomicAutomata.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.util.leftOR
import scala.math.{min,max}
// Ensures that all downstream RW managers support Atomic operations.
// If !passthrough, intercept all Atomics. Otherwise, only intercept those unsupported downstream.
class TLAtomicAutomata(logical: Boolean = true, arithmetic: Boolean = true, concurrency: Int = 1, passthrough: Boolean = true)(implicit p: Parameters) extends LazyModule
{
require (concurrency >= 1)
val node = TLAdapterNode(
managerFn = { case mp => mp.v1copy(managers = mp.managers.map { m =>
val ourSupport = TransferSizes(1, mp.beatBytes)
def widen(x: TransferSizes) = if (passthrough && x.min <= 2*mp.beatBytes) TransferSizes(1, max(mp.beatBytes, x.max)) else ourSupport
val canDoit = m.supportsPutFull.contains(ourSupport) && m.supportsGet.contains(ourSupport)
// Blow up if there are devices to which we cannot add Atomics, because their R|W are too inflexible
require (!m.supportsPutFull || !m.supportsGet || canDoit, s"${m.name} has $ourSupport, needed PutFull(${m.supportsPutFull}) or Get(${m.supportsGet})")
m.v1copy(
supportsArithmetic = if (!arithmetic || !canDoit) m.supportsArithmetic else widen(m.supportsArithmetic),
supportsLogical = if (!logical || !canDoit) m.supportsLogical else widen(m.supportsLogical),
mayDenyGet = m.mayDenyGet || m.mayDenyPut)
})})
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val managers = edgeOut.manager.managers
val beatBytes = edgeOut.manager.beatBytes
// To which managers are we adding atomic support?
val ourSupport = TransferSizes(1, beatBytes)
val managersNeedingHelp = managers.filter { m =>
m.supportsPutFull.contains(ourSupport) &&
m.supportsGet.contains(ourSupport) &&
((logical && !m.supportsLogical .contains(ourSupport)) ||
(arithmetic && !m.supportsArithmetic.contains(ourSupport)) ||
!passthrough) // we will do atomics for everyone we can
}
// Managers that need help with atomics must necessarily have this node as the root of a tree in the node graph.
// (But they must also ensure no sideband operations can get between the read and write.)
val violations = managersNeedingHelp.flatMap(_.findTreeViolation()).map { node => (node.name, node.inputs.map(_._1.name)) }
require(violations.isEmpty,
s"AtomicAutomata can only help nodes for which it is at the root of a diplomatic node tree," +
"but the following violations were found:\n" +
violations.map(v => s"(${v._1} has parents ${v._2})").mkString("\n"))
// We cannot add atomics to a non-FIFO manager
managersNeedingHelp foreach { m => require (m.fifoId.isDefined) }
// We need to preserve FIFO semantics across FIFO domains, not managers
// Suppose you have Put(42) Atomic(+1) both inflight; valid results: 42 or 43
// If we allow Put(42) Get() Put(+1) concurrent; valid results: 42 43 OR undef
// Making non-FIFO work requires waiting for all Acks to come back (=> use FIFOFixer)
val domainsNeedingHelp = managersNeedingHelp.map(_.fifoId.get).distinct
// Don't overprovision the CAM
val camSize = min(domainsNeedingHelp.size, concurrency)
// Compact the fifoIds to only those we care about
def camFifoId(m: TLSlaveParameters) = m.fifoId.map(id => max(0, domainsNeedingHelp.indexOf(id))).getOrElse(0)
// CAM entry state machine
val FREE = 0.U // unused waiting on Atomic from A
val GET = 3.U // Get sent down A waiting on AccessDataAck from D
val AMO = 2.U // AccessDataAck sent up D waiting for A availability
val ACK = 1.U // Put sent down A waiting for PutAck from D
val params = TLAtomicAutomata.CAMParams(out.a.bits.params, domainsNeedingHelp.size)
// Do we need to do anything at all?
if (camSize > 0) {
val initval = Wire(new TLAtomicAutomata.CAM_S(params))
initval.state := FREE
val cam_s = RegInit(VecInit.fill(camSize)(initval))
val cam_a = Reg(Vec(camSize, new TLAtomicAutomata.CAM_A(params)))
val cam_d = Reg(Vec(camSize, new TLAtomicAutomata.CAM_D(params)))
val cam_free = cam_s.map(_.state === FREE)
val cam_amo = cam_s.map(_.state === AMO)
val cam_abusy = cam_s.map(e => e.state === GET || e.state === AMO) // A is blocked
val cam_dmatch = cam_s.map(e => e.state =/= FREE) // D should inspect these entries
// Can the manager already handle this message?
val a_address = edgeIn.address(in.a.bits)
val a_size = edgeIn.size(in.a.bits)
val a_canLogical = passthrough.B && edgeOut.manager.supportsLogicalFast (a_address, a_size)
val a_canArithmetic = passthrough.B && edgeOut.manager.supportsArithmeticFast(a_address, a_size)
val a_isLogical = in.a.bits.opcode === TLMessages.LogicalData
val a_isArithmetic = in.a.bits.opcode === TLMessages.ArithmeticData
val a_isSupported = Mux(a_isLogical, a_canLogical, Mux(a_isArithmetic, a_canArithmetic, true.B))
// Must we do a Put?
val a_cam_any_put = cam_amo.reduce(_ || _)
val a_cam_por_put = cam_amo.scanLeft(false.B)(_||_).init
val a_cam_sel_put = (cam_amo zip a_cam_por_put) map { case (a, b) => a && !b }
val a_cam_a = PriorityMux(cam_amo, cam_a)
val a_cam_d = PriorityMux(cam_amo, cam_d)
val a_a = a_cam_a.bits.data
val a_d = a_cam_d.data
// Does the A request conflict with an inflight AMO?
val a_fifoId = edgeOut.manager.fastProperty(a_address, camFifoId _, (i:Int) => i.U)
val a_cam_busy = (cam_abusy zip cam_a.map(_.fifoId === a_fifoId)) map { case (a,b) => a&&b } reduce (_||_)
// (Where) are we are allocating in the CAM?
val a_cam_any_free = cam_free.reduce(_ || _)
val a_cam_por_free = cam_free.scanLeft(false.B)(_||_).init
val a_cam_sel_free = (cam_free zip a_cam_por_free) map { case (a,b) => a && !b }
// Logical AMO
val indexes = Seq.tabulate(beatBytes*8) { i => Cat(a_a(i,i), a_d(i,i)) }
val logic_out = Cat(indexes.map(x => a_cam_a.lut(x).asUInt).reverse)
// Arithmetic AMO
val unsigned = a_cam_a.bits.param(1)
val take_max = a_cam_a.bits.param(0)
val adder = a_cam_a.bits.param(2)
val mask = a_cam_a.bits.mask
val signSel = ~(~mask | (mask >> 1))
val signbits_a = Cat(Seq.tabulate(beatBytes) { i => a_a(8*i+7,8*i+7) } .reverse)
val signbits_d = Cat(Seq.tabulate(beatBytes) { i => a_d(8*i+7,8*i+7) } .reverse)
// Move the selected sign bit into the first byte position it will extend
val signbit_a = ((signbits_a & signSel) << 1)(beatBytes-1, 0)
val signbit_d = ((signbits_d & signSel) << 1)(beatBytes-1, 0)
val signext_a = FillInterleaved(8, leftOR(signbit_a))
val signext_d = FillInterleaved(8, leftOR(signbit_d))
// NOTE: sign-extension does not change the relative ordering in EITHER unsigned or signed arithmetic
val wide_mask = FillInterleaved(8, mask)
val a_a_ext = (a_a & wide_mask) | signext_a
val a_d_ext = (a_d & wide_mask) | signext_d
val a_d_inv = Mux(adder, a_d_ext, ~a_d_ext)
val adder_out = a_a_ext + a_d_inv
val h = 8*beatBytes-1 // now sign-extended; use biggest bit
val a_bigger_uneq = unsigned === a_a_ext(h) // result if high bits are unequal
val a_bigger = Mux(a_a_ext(h) === a_d_ext(h), !adder_out(h), a_bigger_uneq)
val pick_a = take_max === a_bigger
val arith_out = Mux(adder, adder_out, Mux(pick_a, a_a, a_d))
// AMO result data
val amo_data =
if (!logical) arith_out else
if (!arithmetic) logic_out else
Mux(a_cam_a.bits.opcode(0), logic_out, arith_out)
// Potentially mutate the message from inner
val source_i = Wire(chiselTypeOf(in.a))
val a_allow = !a_cam_busy && (a_isSupported || a_cam_any_free)
in.a.ready := source_i.ready && a_allow
source_i.valid := in.a.valid && a_allow
source_i.bits := in.a.bits
when (!a_isSupported) { // minimal mux difference
source_i.bits.opcode := TLMessages.Get
source_i.bits.param := 0.U
}
// Potentially take the message from the CAM
val source_c = Wire(chiselTypeOf(in.a))
source_c.valid := a_cam_any_put
source_c.bits := edgeOut.Put(
fromSource = a_cam_a.bits.source,
toAddress = edgeIn.address(a_cam_a.bits),
lgSize = a_cam_a.bits.size,
data = amo_data,
corrupt = a_cam_a.bits.corrupt || a_cam_d.corrupt)._2
source_c.bits.user :<= a_cam_a.bits.user
source_c.bits.echo :<= a_cam_a.bits.echo
// Finishing an AMO from the CAM has highest priority
TLArbiter(TLArbiter.lowestIndexFirst)(out.a, (0.U, source_c), (edgeOut.numBeats1(in.a.bits), source_i))
// Capture the A state into the CAM
when (source_i.fire && !a_isSupported) {
(a_cam_sel_free zip cam_a) foreach { case (en, r) =>
when (en) {
r.fifoId := a_fifoId
r.bits := in.a.bits
r.lut := MuxLookup(in.a.bits.param(1, 0), 0.U(4.W))(Array(
TLAtomics.AND -> 0x8.U,
TLAtomics.OR -> 0xe.U,
TLAtomics.XOR -> 0x6.U,
TLAtomics.SWAP -> 0xc.U))
}
}
(a_cam_sel_free zip cam_s) foreach { case (en, r) =>
when (en) {
r.state := GET
}
}
}
// Advance the put state
when (source_c.fire) {
(a_cam_sel_put zip cam_s) foreach { case (en, r) =>
when (en) {
r.state := ACK
}
}
}
// We need to deal with a potential D response in the same cycle as the A request
val d_first = edgeOut.first(out.d)
val d_cam_sel_raw = cam_a.map(_.bits.source === in.d.bits.source)
val d_cam_sel_match = (d_cam_sel_raw zip cam_dmatch) map { case (a,b) => a&&b }
val d_cam_data = Mux1H(d_cam_sel_match, cam_d.map(_.data))
val d_cam_denied = Mux1H(d_cam_sel_match, cam_d.map(_.denied))
val d_cam_corrupt = Mux1H(d_cam_sel_match, cam_d.map(_.corrupt))
val d_cam_sel_bypass = if (edgeOut.manager.minLatency > 0) false.B else
out.d.bits.source === in.a.bits.source && in.a.valid && !a_isSupported
val d_cam_sel = (a_cam_sel_free zip d_cam_sel_match) map { case (a,d) => Mux(d_cam_sel_bypass, a, d) }
val d_cam_sel_any = d_cam_sel_bypass || d_cam_sel_match.reduce(_ || _)
val d_ackd = out.d.bits.opcode === TLMessages.AccessAckData
val d_ack = out.d.bits.opcode === TLMessages.AccessAck
when (out.d.fire && d_first) {
(d_cam_sel zip cam_d) foreach { case (en, r) =>
when (en && d_ackd) {
r.data := out.d.bits.data
r.denied := out.d.bits.denied
r.corrupt := out.d.bits.corrupt
}
}
(d_cam_sel zip cam_s) foreach { case (en, r) =>
when (en) {
// Note: it is important that this comes AFTER the := GET, so we can go FREE=>GET=>AMO in one cycle
r.state := Mux(d_ackd, AMO, FREE)
}
}
}
val d_drop = d_first && d_ackd && d_cam_sel_any
val d_replace = d_first && d_ack && d_cam_sel_match.reduce(_ || _)
in.d.valid := out.d.valid && !d_drop
out.d.ready := in.d.ready || d_drop
in.d.bits := out.d.bits
when (d_replace) { // minimal muxes
in.d.bits.opcode := TLMessages.AccessAckData
in.d.bits.data := d_cam_data
in.d.bits.corrupt := d_cam_corrupt || out.d.bits.denied
in.d.bits.denied := d_cam_denied || out.d.bits.denied
}
} else {
out.a.valid := in.a.valid
in.a.ready := out.a.ready
out.a.bits := in.a.bits
in.d.valid := out.d.valid
out.d.ready := in.d.ready
in.d.bits := out.d.bits
}
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
in.b.valid := out.b.valid
out.b.ready := in.b.ready
in.b.bits := out.b.bits
out.c.valid := in.c.valid
in.c.ready := out.c.ready
out.c.bits := in.c.bits
out.e.valid := in.e.valid
in.e.ready := out.e.ready
out.e.bits := in.e.bits
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLAtomicAutomata
{
def apply(logical: Boolean = true, arithmetic: Boolean = true, concurrency: Int = 1, passthrough: Boolean = true, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val atomics = LazyModule(new TLAtomicAutomata(logical, arithmetic, concurrency, passthrough) {
override lazy val desiredName = (Seq("TLAtomicAutomata") ++ nameSuffix).mkString("_")
})
atomics.node
}
case class CAMParams(a: TLBundleParameters, domainsNeedingHelp: Int)
class CAM_S(val params: CAMParams) extends Bundle {
val state = UInt(2.W)
}
class CAM_A(val params: CAMParams) extends Bundle {
val bits = new TLBundleA(params.a)
val fifoId = UInt(log2Up(params.domainsNeedingHelp).W)
val lut = UInt(4.W)
}
class CAM_D(val params: CAMParams) extends Bundle {
val data = UInt(params.a.dataBits.W)
val denied = Bool()
val corrupt = Bool()
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMAtomicAutomata(txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("AtomicAutomata"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
// Confirm that the AtomicAutomata combines read + write errors
import TLMessages._
val test = new RequestPattern({a: TLBundleA =>
val doesA = a.opcode === ArithmeticData || a.opcode === LogicalData
val doesR = a.opcode === Get || doesA
val doesW = a.opcode === PutFullData || a.opcode === PutPartialData || doesA
(doesR && RequestPattern.overlaps(Seq(AddressSet(0x08, ~0x08)))(a)) ||
(doesW && RequestPattern.overlaps(Seq(AddressSet(0x10, ~0x10)))(a))
})
(ram.node
:= TLErrorEvaluator(test)
:= TLFragmenter(4, 256)
:= TLDelayer(0.1)
:= TLAtomicAutomata()
:= TLDelayer(0.1)
:= TLErrorEvaluator(test, testOn=true, testOff=true)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMAtomicAutomataTest(txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMAtomicAutomata(txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File PeripheryBus.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.subsystem
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.devices.tilelink.{BuiltInZeroDeviceParams, BuiltInErrorDeviceParams, HasBuiltInDeviceParams, BuiltInDevices}
import freechips.rocketchip.diplomacy.BufferParams
import freechips.rocketchip.tilelink.{
RegionReplicator, ReplicatedRegion, HasTLBusParams, HasRegionReplicatorParams, TLBusWrapper,
TLBusWrapperInstantiationLike, TLFIFOFixer, TLNode, TLXbar, TLInwardNode, TLOutwardNode,
TLBuffer, TLWidthWidget, TLAtomicAutomata, TLEdge
}
import freechips.rocketchip.util.Location
case class BusAtomics(
arithmetic: Boolean = true,
buffer: BufferParams = BufferParams.default,
widenBytes: Option[Int] = None
)
case class PeripheryBusParams(
beatBytes: Int,
blockBytes: Int,
atomics: Option[BusAtomics] = Some(BusAtomics()),
dtsFrequency: Option[BigInt] = None,
zeroDevice: Option[BuiltInZeroDeviceParams] = None,
errorDevice: Option[BuiltInErrorDeviceParams] = None,
replication: Option[ReplicatedRegion] = None)
extends HasTLBusParams
with HasBuiltInDeviceParams
with HasRegionReplicatorParams
with TLBusWrapperInstantiationLike
{
def instantiate(context: HasTileLinkLocations, loc: Location[TLBusWrapper])(implicit p: Parameters): PeripheryBus = {
val pbus = LazyModule(new PeripheryBus(this, loc.name))
pbus.suggestName(loc.name)
context.tlBusWrapperLocationMap += (loc -> pbus)
pbus
}
}
class PeripheryBus(params: PeripheryBusParams, name: String)(implicit p: Parameters)
extends TLBusWrapper(params, name)
{
override lazy val desiredName = s"PeripheryBus_$name"
private val replicator = params.replication.map(r => LazyModule(new RegionReplicator(r)))
val prefixNode = replicator.map { r =>
r.prefix := addressPrefixNexusNode
addressPrefixNexusNode
}
private val fixer = LazyModule(new TLFIFOFixer(TLFIFOFixer.all))
private val node: TLNode = params.atomics.map { pa =>
val in_xbar = LazyModule(new TLXbar(nameSuffix = Some(s"${name}_in")))
val out_xbar = LazyModule(new TLXbar(nameSuffix = Some(s"${name}_out")))
val fixer_node = replicator.map(fixer.node :*= _.node).getOrElse(fixer.node)
(out_xbar.node
:*= fixer_node
:*= TLBuffer(pa.buffer)
:*= (pa.widenBytes.filter(_ > beatBytes).map { w =>
TLWidthWidget(w) :*= TLAtomicAutomata(arithmetic = pa.arithmetic, nameSuffix = Some(name))
} .getOrElse { TLAtomicAutomata(arithmetic = pa.arithmetic, nameSuffix = Some(name)) })
:*= in_xbar.node)
} .getOrElse { TLXbar() :*= fixer.node }
def inwardNode: TLInwardNode = node
def outwardNode: TLOutwardNode = node
def busView: TLEdge = fixer.node.edges.in.head
val builtInDevices: BuiltInDevices = BuiltInDevices.attach(params, outwardNode)
}
File ClockGroup.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.prci
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.resources.FixedClockResource
case class ClockGroupingNode(groupName: String)(implicit valName: ValName)
extends MixedNexusNode(ClockGroupImp, ClockImp)(
dFn = { _ => ClockSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) })
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupingNode(groupName)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
require (node.in.size == 1)
require (in.member.size == out.size)
(in.member.data zip out) foreach { case (i, o) => o := i }
}
}
object ClockGroup
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node
}
case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))})
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupAggregateNode(groupName)
override lazy val desiredName = s"ClockGroupAggregator_$groupName"
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in.unzip
val (out, _) = node.out.unzip
val outputs = out.flatMap(_.member.data)
require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1")
require (in.head.member.size == outputs.size)
in.head.member.data.zip(outputs).foreach { case (i, o) => o := i }
}
}
object ClockGroupAggregator
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node
}
class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val (out, _) = node.out.unzip
out.map { out: ClockGroupBundle =>
out.member.data.foreach { o =>
o.clock := clock; o.reset := reset }
}
}
}
object SimpleClockGroupSource
{
def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node
}
case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName)
extends NexusNode(ClockImp)(
dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) },
uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) },
inputRequiresOutput = false) {
def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix)))
}
class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule
{
val node = new FixedClockBroadcastNode(fixedClockOpt) {
override def circuitIdentity = outputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
override def desiredName = s"FixedClockBroadcast_${out.size}"
require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock")
out.foreach { _ := in }
}
}
object FixedClockBroadcast
{
def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node
}
case class PRCIClockGroupNode()(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { _ => ClockGroupSinkParameters("prci", Nil) },
outputRequiresInput = false)
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File CustomBootPin.scala:
package testchipip.boot
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.subsystem._
case class CustomBootPinParams(
customBootAddress: BigInt = 0x80000000L, // Default is DRAM_BASE
masterWhere: TLBusWrapperLocation = CBUS // This needs to write to clint and bootaddrreg, which are on CBUS/PBUS
)
case object CustomBootPinKey extends Field[Option[CustomBootPinParams]](None)
trait CanHavePeripheryCustomBootPin { this: BaseSubsystem =>
val custom_boot_pin = p(CustomBootPinKey).map { params =>
require(p(BootAddrRegKey).isDefined, "CustomBootPin relies on existence of BootAddrReg")
val tlbus = locateTLBusWrapper(params.masterWhere)
val clientParams = TLMasterPortParameters.v1(
clients = Seq(TLMasterParameters.v1(
name = "custom-boot",
sourceId = IdRange(0, 1),
)),
minLatency = 1
)
val inner_io = tlbus {
val node = TLClientNode(Seq(clientParams))
tlbus.coupleFrom(s"port_named_custom_boot_pin") ({ _ := node })
InModuleBody {
val custom_boot = IO(Input(Bool())).suggestName("custom_boot")
val (tl, edge) = node.out(0)
val inactive :: waiting_bootaddr_reg_a :: waiting_bootaddr_reg_d :: waiting_msip_a :: waiting_msip_d :: dead :: Nil = Enum(6)
val state = RegInit(inactive)
tl.a.valid := false.B
tl.a.bits := DontCare
tl.d.ready := true.B
switch (state) {
is (inactive) { when (custom_boot) { state := waiting_bootaddr_reg_a } }
is (waiting_bootaddr_reg_a) {
tl.a.valid := true.B
tl.a.bits := edge.Put(
toAddress = p(BootAddrRegKey).get.bootRegAddress.U,
fromSource = 0.U,
lgSize = 2.U,
data = params.customBootAddress.U
)._2
when (tl.a.fire) { state := waiting_bootaddr_reg_d }
}
is (waiting_bootaddr_reg_d) { when (tl.d.fire) { state := waiting_msip_a } }
is (waiting_msip_a) {
tl.a.valid := true.B
tl.a.bits := edge.Put(
toAddress = (p(CLINTKey).get.baseAddress + CLINTConsts.msipOffset(0)).U, // msip for hart0
fromSource = 0.U,
lgSize = log2Ceil(CLINTConsts.msipBytes).U,
data = 1.U
)._2
when (tl.a.fire) { state := waiting_msip_d }
}
is (waiting_msip_d) { when (tl.d.fire) { state := dead } }
is (dead) { when (!custom_boot) { state := inactive } }
}
custom_boot
}
}
val outer_io = InModuleBody {
val custom_boot = IO(Input(Bool())).suggestName("custom_boot")
inner_io := custom_boot
custom_boot
}
outer_io
}
}
File LazyScope.scala:
package org.chipsalliance.diplomacy.lazymodule
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.ValName
/** Allows dynamic creation of [[Module]] hierarchy and "shoving" logic into a [[LazyModule]]. */
trait LazyScope {
this: LazyModule =>
override def toString: String = s"LazyScope named $name"
/** Evaluate `body` in the current [[LazyModule.scope]] */
def apply[T](body: => T): T = {
// Preserve the previous value of the [[LazyModule.scope]], because when calling [[apply]] function,
// [[LazyModule.scope]] will be altered.
val saved = LazyModule.scope
// [[LazyModule.scope]] stack push.
LazyModule.scope = Some(this)
// Evaluate [[body]] in the current `scope`, saving the result to [[out]].
val out = body
// Check that the `scope` after evaluating `body` is the same as when we started.
require(LazyModule.scope.isDefined, s"LazyScope $name tried to exit, but scope was empty!")
require(
LazyModule.scope.get eq this,
s"LazyScope $name exited before LazyModule ${LazyModule.scope.get.name} was closed"
)
// [[LazyModule.scope]] stack pop.
LazyModule.scope = saved
out
}
}
/** Used to automatically create a level of module hierarchy (a [[SimpleLazyModule]]) within which [[LazyModule]]s can
* be instantiated and connected.
*
* It will instantiate a [[SimpleLazyModule]] to manage evaluation of `body` and evaluate `body` code snippets in this
* scope.
*/
object LazyScope {
/** Create a [[LazyScope]] with an implicit instance name.
*
* @param body
* code executed within the generated [[SimpleLazyModule]].
* @param valName
* instance name of generated [[SimpleLazyModule]].
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
body: => T
)(
implicit valName: ValName,
p: Parameters
): T = {
apply(valName.value, "SimpleLazyModule", None)(body)(p)
}
/** Create a [[LazyScope]] with an explicitly defined instance name.
*
* @param name
* instance name of generated [[SimpleLazyModule]].
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
name: String
)(body: => T
)(
implicit p: Parameters
): T = {
apply(name, "SimpleLazyModule", None)(body)(p)
}
/** Create a [[LazyScope]] with an explicit instance and class name, and control inlining.
*
* @param name
* instance name of generated [[SimpleLazyModule]].
* @param desiredModuleName
* class name of generated [[SimpleLazyModule]].
* @param overrideInlining
* tell FIRRTL that this [[SimpleLazyModule]]'s module should be inlined.
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def apply[T](
name: String,
desiredModuleName: String,
overrideInlining: Option[Boolean] = None
)(body: => T
)(
implicit p: Parameters
): T = {
val scope = LazyModule(new SimpleLazyModule with LazyScope {
override lazy val desiredName = desiredModuleName
override def shouldBeInlined = overrideInlining.getOrElse(super.shouldBeInlined)
}).suggestName(name)
scope {
body
}
}
/** Create a [[LazyScope]] to temporarily group children for some reason, but tell Firrtl to inline it.
*
* For example, we might want to control a set of children's clocks but then not keep the parent wrapper.
*
* @param body
* code executed within the generated `SimpleLazyModule`
* @param p
* [[Parameters]] propagated to [[SimpleLazyModule]].
*/
def inline[T](
body: => T
)(
implicit p: Parameters
): T = {
apply("noname", "ShouldBeInlined", Some(false))(body)(p)
}
}
| module PeripheryBus_cbus( // @[ClockDomain.scala:14:9]
input auto_coupler_to_prci_ctrl_fixer_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_prci_ctrl_fixer_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_prci_ctrl_fixer_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_prci_ctrl_fixer_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_prci_ctrl_fixer_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_coupler_to_prci_ctrl_fixer_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [20:0] auto_coupler_to_prci_ctrl_fixer_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_prci_ctrl_fixer_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_to_prci_ctrl_fixer_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_prci_ctrl_fixer_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_prci_ctrl_fixer_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_prci_ctrl_fixer_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_prci_ctrl_fixer_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_prci_ctrl_fixer_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_coupler_to_prci_ctrl_fixer_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_prci_ctrl_fixer_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bootrom_fragmenter_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bootrom_fragmenter_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_bootrom_fragmenter_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_bootrom_fragmenter_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_to_bootrom_fragmenter_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_coupler_to_bootrom_fragmenter_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [16:0] auto_coupler_to_bootrom_fragmenter_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_bootrom_fragmenter_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bootrom_fragmenter_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bootrom_fragmenter_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bootrom_fragmenter_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_bootrom_fragmenter_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_coupler_to_bootrom_fragmenter_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_bootrom_fragmenter_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_debug_fragmenter_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_debug_fragmenter_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_debug_fragmenter_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_debug_fragmenter_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_to_debug_fragmenter_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_coupler_to_debug_fragmenter_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [11:0] auto_coupler_to_debug_fragmenter_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_debug_fragmenter_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_to_debug_fragmenter_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_debug_fragmenter_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_debug_fragmenter_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_debug_fragmenter_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_debug_fragmenter_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_debug_fragmenter_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_coupler_to_debug_fragmenter_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_debug_fragmenter_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_plic_fragmenter_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_plic_fragmenter_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_plic_fragmenter_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_plic_fragmenter_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_to_plic_fragmenter_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_coupler_to_plic_fragmenter_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [27:0] auto_coupler_to_plic_fragmenter_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_plic_fragmenter_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_to_plic_fragmenter_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_plic_fragmenter_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_plic_fragmenter_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_plic_fragmenter_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_plic_fragmenter_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_plic_fragmenter_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_coupler_to_plic_fragmenter_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_plic_fragmenter_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_clint_fragmenter_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_clint_fragmenter_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_clint_fragmenter_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_clint_fragmenter_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_to_clint_fragmenter_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_coupler_to_clint_fragmenter_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [25:0] auto_coupler_to_clint_fragmenter_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_clint_fragmenter_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_to_clint_fragmenter_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_clint_fragmenter_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_clint_fragmenter_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_clint_fragmenter_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_clint_fragmenter_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_clint_fragmenter_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_coupler_to_clint_fragmenter_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_clint_fragmenter_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_pbus_bus_xing_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bus_named_pbus_bus_xing_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_bus_named_pbus_bus_xing_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_bus_named_pbus_bus_xing_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_bus_named_pbus_bus_xing_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [6:0] auto_coupler_to_bus_named_pbus_bus_xing_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [28:0] auto_coupler_to_bus_named_pbus_bus_xing_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_bus_named_pbus_bus_xing_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_to_bus_named_pbus_bus_xing_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bus_named_pbus_bus_xing_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bus_named_pbus_bus_xing_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_pbus_bus_xing_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_bus_named_pbus_bus_xing_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_bus_named_pbus_bus_xing_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_bus_named_pbus_bus_xing_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [6:0] auto_coupler_to_bus_named_pbus_bus_xing_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_pbus_bus_xing_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_pbus_bus_xing_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_bus_named_pbus_bus_xing_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_pbus_bus_xing_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_l2_ctrl_buffer_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_l2_ctrl_buffer_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_l2_ctrl_buffer_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_l2_ctrl_buffer_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coupler_to_l2_ctrl_buffer_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_coupler_to_l2_ctrl_buffer_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [25:0] auto_coupler_to_l2_ctrl_buffer_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_l2_ctrl_buffer_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_to_l2_ctrl_buffer_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_l2_ctrl_buffer_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_l2_ctrl_buffer_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_l2_ctrl_buffer_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_l2_ctrl_buffer_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_l2_ctrl_buffer_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_coupler_to_l2_ctrl_buffer_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_l2_ctrl_buffer_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_5_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_5_reset, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_4_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_4_reset, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_3_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_3_reset, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_2_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_2_reset, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_1_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_1_reset, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_0_clock, // @[LazyModuleImp.scala:107:25]
output auto_fixedClockNode_anon_out_0_reset, // @[LazyModuleImp.scala:107:25]
input auto_cbus_clock_groups_in_member_cbus_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_cbus_clock_groups_in_member_cbus_0_reset, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_bus_xing_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_bus_xing_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_bus_xing_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_bus_xing_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_bus_xing_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [28:0] auto_bus_xing_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_bus_xing_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_bus_xing_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_bus_xing_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_bus_xing_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_bus_xing_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_bus_xing_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_bus_xing_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_bus_xing_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_bus_xing_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_bus_xing_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input custom_boot // @[CustomBootPin.scala:36:29]
);
wire _coupler_to_prci_ctrl_auto_tl_in_a_ready; // @[LazyScope.scala:98:27]
wire _coupler_to_prci_ctrl_auto_tl_in_d_valid; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_prci_ctrl_auto_tl_in_d_bits_opcode; // @[LazyScope.scala:98:27]
wire [1:0] _coupler_to_prci_ctrl_auto_tl_in_d_bits_param; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_prci_ctrl_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27]
wire [6:0] _coupler_to_prci_ctrl_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27]
wire _coupler_to_prci_ctrl_auto_tl_in_d_bits_sink; // @[LazyScope.scala:98:27]
wire _coupler_to_prci_ctrl_auto_tl_in_d_bits_denied; // @[LazyScope.scala:98:27]
wire [63:0] _coupler_to_prci_ctrl_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27]
wire _coupler_to_prci_ctrl_auto_tl_in_d_bits_corrupt; // @[LazyScope.scala:98:27]
wire _coupler_to_bootrom_auto_tl_in_a_ready; // @[LazyScope.scala:98:27]
wire _coupler_to_bootrom_auto_tl_in_d_valid; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_bootrom_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27]
wire [6:0] _coupler_to_bootrom_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27]
wire [63:0] _coupler_to_bootrom_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27]
wire _coupler_to_debug_auto_tl_in_a_ready; // @[LazyScope.scala:98:27]
wire _coupler_to_debug_auto_tl_in_d_valid; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_debug_auto_tl_in_d_bits_opcode; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_debug_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27]
wire [6:0] _coupler_to_debug_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27]
wire [63:0] _coupler_to_debug_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27]
wire _coupler_to_plic_auto_tl_in_a_ready; // @[LazyScope.scala:98:27]
wire _coupler_to_plic_auto_tl_in_d_valid; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_plic_auto_tl_in_d_bits_opcode; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_plic_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27]
wire [6:0] _coupler_to_plic_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27]
wire [63:0] _coupler_to_plic_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27]
wire _coupler_to_clint_auto_tl_in_a_ready; // @[LazyScope.scala:98:27]
wire _coupler_to_clint_auto_tl_in_d_valid; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_clint_auto_tl_in_d_bits_opcode; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_clint_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27]
wire [6:0] _coupler_to_clint_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27]
wire [63:0] _coupler_to_clint_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27]
wire _coupler_to_l2_ctrl_auto_tl_in_a_ready; // @[LazyScope.scala:98:27]
wire _coupler_to_l2_ctrl_auto_tl_in_d_valid; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_l2_ctrl_auto_tl_in_d_bits_opcode; // @[LazyScope.scala:98:27]
wire [1:0] _coupler_to_l2_ctrl_auto_tl_in_d_bits_param; // @[LazyScope.scala:98:27]
wire [2:0] _coupler_to_l2_ctrl_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27]
wire [6:0] _coupler_to_l2_ctrl_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27]
wire _coupler_to_l2_ctrl_auto_tl_in_d_bits_sink; // @[LazyScope.scala:98:27]
wire _coupler_to_l2_ctrl_auto_tl_in_d_bits_denied; // @[LazyScope.scala:98:27]
wire [63:0] _coupler_to_l2_ctrl_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27]
wire _coupler_to_l2_ctrl_auto_tl_in_d_bits_corrupt; // @[LazyScope.scala:98:27]
wire _wrapped_error_device_auto_buffer_in_a_ready; // @[LazyScope.scala:98:27]
wire _wrapped_error_device_auto_buffer_in_d_valid; // @[LazyScope.scala:98:27]
wire [2:0] _wrapped_error_device_auto_buffer_in_d_bits_opcode; // @[LazyScope.scala:98:27]
wire [1:0] _wrapped_error_device_auto_buffer_in_d_bits_param; // @[LazyScope.scala:98:27]
wire [3:0] _wrapped_error_device_auto_buffer_in_d_bits_size; // @[LazyScope.scala:98:27]
wire [6:0] _wrapped_error_device_auto_buffer_in_d_bits_source; // @[LazyScope.scala:98:27]
wire _wrapped_error_device_auto_buffer_in_d_bits_sink; // @[LazyScope.scala:98:27]
wire _wrapped_error_device_auto_buffer_in_d_bits_denied; // @[LazyScope.scala:98:27]
wire [63:0] _wrapped_error_device_auto_buffer_in_d_bits_data; // @[LazyScope.scala:98:27]
wire _wrapped_error_device_auto_buffer_in_d_bits_corrupt; // @[LazyScope.scala:98:27]
wire _atomics_auto_in_a_ready; // @[AtomicAutomata.scala:289:29]
wire _atomics_auto_in_d_valid; // @[AtomicAutomata.scala:289:29]
wire [2:0] _atomics_auto_in_d_bits_opcode; // @[AtomicAutomata.scala:289:29]
wire [1:0] _atomics_auto_in_d_bits_param; // @[AtomicAutomata.scala:289:29]
wire [3:0] _atomics_auto_in_d_bits_size; // @[AtomicAutomata.scala:289:29]
wire [6:0] _atomics_auto_in_d_bits_source; // @[AtomicAutomata.scala:289:29]
wire _atomics_auto_in_d_bits_sink; // @[AtomicAutomata.scala:289:29]
wire _atomics_auto_in_d_bits_denied; // @[AtomicAutomata.scala:289:29]
wire [63:0] _atomics_auto_in_d_bits_data; // @[AtomicAutomata.scala:289:29]
wire _atomics_auto_in_d_bits_corrupt; // @[AtomicAutomata.scala:289:29]
wire _atomics_auto_out_a_valid; // @[AtomicAutomata.scala:289:29]
wire [2:0] _atomics_auto_out_a_bits_opcode; // @[AtomicAutomata.scala:289:29]
wire [2:0] _atomics_auto_out_a_bits_param; // @[AtomicAutomata.scala:289:29]
wire [3:0] _atomics_auto_out_a_bits_size; // @[AtomicAutomata.scala:289:29]
wire [6:0] _atomics_auto_out_a_bits_source; // @[AtomicAutomata.scala:289:29]
wire [28:0] _atomics_auto_out_a_bits_address; // @[AtomicAutomata.scala:289:29]
wire [7:0] _atomics_auto_out_a_bits_mask; // @[AtomicAutomata.scala:289:29]
wire [63:0] _atomics_auto_out_a_bits_data; // @[AtomicAutomata.scala:289:29]
wire _atomics_auto_out_a_bits_corrupt; // @[AtomicAutomata.scala:289:29]
wire _atomics_auto_out_d_ready; // @[AtomicAutomata.scala:289:29]
wire _buffer_auto_in_a_ready; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_in_d_bits_opcode; // @[Buffer.scala:75:28]
wire [1:0] _buffer_auto_in_d_bits_param; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_in_d_bits_size; // @[Buffer.scala:75:28]
wire [6:0] _buffer_auto_in_d_bits_source; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_sink; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_denied; // @[Buffer.scala:75:28]
wire [63:0] _buffer_auto_in_d_bits_data; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:75:28]
wire _buffer_auto_out_a_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_out_a_bits_opcode; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_out_a_bits_param; // @[Buffer.scala:75:28]
wire [3:0] _buffer_auto_out_a_bits_size; // @[Buffer.scala:75:28]
wire [6:0] _buffer_auto_out_a_bits_source; // @[Buffer.scala:75:28]
wire [28:0] _buffer_auto_out_a_bits_address; // @[Buffer.scala:75:28]
wire [7:0] _buffer_auto_out_a_bits_mask; // @[Buffer.scala:75:28]
wire [63:0] _buffer_auto_out_a_bits_data; // @[Buffer.scala:75:28]
wire _buffer_auto_out_a_bits_corrupt; // @[Buffer.scala:75:28]
wire _buffer_auto_out_d_ready; // @[Buffer.scala:75:28]
wire _out_xbar_auto_anon_in_a_ready; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_in_d_valid; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_in_d_bits_opcode; // @[PeripheryBus.scala:57:30]
wire [1:0] _out_xbar_auto_anon_in_d_bits_param; // @[PeripheryBus.scala:57:30]
wire [3:0] _out_xbar_auto_anon_in_d_bits_size; // @[PeripheryBus.scala:57:30]
wire [6:0] _out_xbar_auto_anon_in_d_bits_source; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_in_d_bits_sink; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_in_d_bits_denied; // @[PeripheryBus.scala:57:30]
wire [63:0] _out_xbar_auto_anon_in_d_bits_data; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_in_d_bits_corrupt; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_7_a_valid; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_7_a_bits_opcode; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_7_a_bits_param; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_7_a_bits_size; // @[PeripheryBus.scala:57:30]
wire [6:0] _out_xbar_auto_anon_out_7_a_bits_source; // @[PeripheryBus.scala:57:30]
wire [20:0] _out_xbar_auto_anon_out_7_a_bits_address; // @[PeripheryBus.scala:57:30]
wire [7:0] _out_xbar_auto_anon_out_7_a_bits_mask; // @[PeripheryBus.scala:57:30]
wire [63:0] _out_xbar_auto_anon_out_7_a_bits_data; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_7_a_bits_corrupt; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_7_d_ready; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_6_a_valid; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_6_a_bits_opcode; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_6_a_bits_param; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_6_a_bits_size; // @[PeripheryBus.scala:57:30]
wire [6:0] _out_xbar_auto_anon_out_6_a_bits_source; // @[PeripheryBus.scala:57:30]
wire [16:0] _out_xbar_auto_anon_out_6_a_bits_address; // @[PeripheryBus.scala:57:30]
wire [7:0] _out_xbar_auto_anon_out_6_a_bits_mask; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_6_a_bits_corrupt; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_6_d_ready; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_5_a_valid; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_5_a_bits_opcode; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_5_a_bits_param; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_5_a_bits_size; // @[PeripheryBus.scala:57:30]
wire [6:0] _out_xbar_auto_anon_out_5_a_bits_source; // @[PeripheryBus.scala:57:30]
wire [11:0] _out_xbar_auto_anon_out_5_a_bits_address; // @[PeripheryBus.scala:57:30]
wire [7:0] _out_xbar_auto_anon_out_5_a_bits_mask; // @[PeripheryBus.scala:57:30]
wire [63:0] _out_xbar_auto_anon_out_5_a_bits_data; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_5_a_bits_corrupt; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_5_d_ready; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_4_a_valid; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_4_a_bits_opcode; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_4_a_bits_param; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_4_a_bits_size; // @[PeripheryBus.scala:57:30]
wire [6:0] _out_xbar_auto_anon_out_4_a_bits_source; // @[PeripheryBus.scala:57:30]
wire [27:0] _out_xbar_auto_anon_out_4_a_bits_address; // @[PeripheryBus.scala:57:30]
wire [7:0] _out_xbar_auto_anon_out_4_a_bits_mask; // @[PeripheryBus.scala:57:30]
wire [63:0] _out_xbar_auto_anon_out_4_a_bits_data; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_4_a_bits_corrupt; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_4_d_ready; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_3_a_valid; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_3_a_bits_opcode; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_3_a_bits_param; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_3_a_bits_size; // @[PeripheryBus.scala:57:30]
wire [6:0] _out_xbar_auto_anon_out_3_a_bits_source; // @[PeripheryBus.scala:57:30]
wire [25:0] _out_xbar_auto_anon_out_3_a_bits_address; // @[PeripheryBus.scala:57:30]
wire [7:0] _out_xbar_auto_anon_out_3_a_bits_mask; // @[PeripheryBus.scala:57:30]
wire [63:0] _out_xbar_auto_anon_out_3_a_bits_data; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_3_a_bits_corrupt; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_3_d_ready; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_1_a_valid; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_1_a_bits_opcode; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_1_a_bits_param; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_1_a_bits_size; // @[PeripheryBus.scala:57:30]
wire [6:0] _out_xbar_auto_anon_out_1_a_bits_source; // @[PeripheryBus.scala:57:30]
wire [25:0] _out_xbar_auto_anon_out_1_a_bits_address; // @[PeripheryBus.scala:57:30]
wire [7:0] _out_xbar_auto_anon_out_1_a_bits_mask; // @[PeripheryBus.scala:57:30]
wire [63:0] _out_xbar_auto_anon_out_1_a_bits_data; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_1_a_bits_corrupt; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_1_d_ready; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_0_a_valid; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_0_a_bits_opcode; // @[PeripheryBus.scala:57:30]
wire [2:0] _out_xbar_auto_anon_out_0_a_bits_param; // @[PeripheryBus.scala:57:30]
wire [3:0] _out_xbar_auto_anon_out_0_a_bits_size; // @[PeripheryBus.scala:57:30]
wire [6:0] _out_xbar_auto_anon_out_0_a_bits_source; // @[PeripheryBus.scala:57:30]
wire [13:0] _out_xbar_auto_anon_out_0_a_bits_address; // @[PeripheryBus.scala:57:30]
wire [7:0] _out_xbar_auto_anon_out_0_a_bits_mask; // @[PeripheryBus.scala:57:30]
wire [63:0] _out_xbar_auto_anon_out_0_a_bits_data; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_0_a_bits_corrupt; // @[PeripheryBus.scala:57:30]
wire _out_xbar_auto_anon_out_0_d_ready; // @[PeripheryBus.scala:57:30]
wire _in_xbar_auto_anon_in_1_a_ready; // @[PeripheryBus.scala:56:29]
wire _in_xbar_auto_anon_in_1_d_valid; // @[PeripheryBus.scala:56:29]
wire _in_xbar_auto_anon_out_a_valid; // @[PeripheryBus.scala:56:29]
wire [2:0] _in_xbar_auto_anon_out_a_bits_opcode; // @[PeripheryBus.scala:56:29]
wire [2:0] _in_xbar_auto_anon_out_a_bits_param; // @[PeripheryBus.scala:56:29]
wire [3:0] _in_xbar_auto_anon_out_a_bits_size; // @[PeripheryBus.scala:56:29]
wire [6:0] _in_xbar_auto_anon_out_a_bits_source; // @[PeripheryBus.scala:56:29]
wire [28:0] _in_xbar_auto_anon_out_a_bits_address; // @[PeripheryBus.scala:56:29]
wire [7:0] _in_xbar_auto_anon_out_a_bits_mask; // @[PeripheryBus.scala:56:29]
wire [63:0] _in_xbar_auto_anon_out_a_bits_data; // @[PeripheryBus.scala:56:29]
wire _in_xbar_auto_anon_out_a_bits_corrupt; // @[PeripheryBus.scala:56:29]
wire _in_xbar_auto_anon_out_d_ready; // @[PeripheryBus.scala:56:29]
wire _fixedClockNode_auto_anon_out_0_clock; // @[ClockGroup.scala:115:114]
wire _fixedClockNode_auto_anon_out_0_reset; // @[ClockGroup.scala:115:114]
reg [2:0] state; // @[CustomBootPin.scala:39:28]
wire _GEN = state == 3'h1; // @[CustomBootPin.scala:39:28, :43:24]
wire nodeOut_a_valid = (|state) & (_GEN | state != 3'h2 & state == 3'h3); // @[CustomBootPin.scala:39:28, :40:20, :43:24, :46:24]
wire _GEN_0 = _in_xbar_auto_anon_in_1_a_ready & nodeOut_a_valid; // @[Decoupled.scala:51:35]
wire [2:0] _GEN_1 = state == 3'h5 & ~custom_boot ? 3'h0 : state; // @[CustomBootPin.scala:39:28, :43:24, :67:{29,43,51}]
wire [7:0][2:0] _GEN_2 = {{_GEN_1}, {_GEN_1}, {_GEN_1}, {_in_xbar_auto_anon_in_1_d_valid ? 3'h5 : state}, {_GEN_0 ? 3'h4 : state}, {_in_xbar_auto_anon_in_1_d_valid ? 3'h3 : state}, {_GEN_0 ? 3'h2 : state}, {custom_boot ? 3'h1 : state}}; // @[Decoupled.scala:51:35]
always @(posedge _fixedClockNode_auto_anon_out_0_clock) begin // @[ClockGroup.scala:115:114]
if (_fixedClockNode_auto_anon_out_0_reset) // @[ClockGroup.scala:115:114]
state <= 3'h0; // @[CustomBootPin.scala:39:28]
else // @[ClockGroup.scala:115:114]
state <= _GEN_2[state]; // @[CustomBootPin.scala:39:28, :43:24, :44:46, :53:30, :55:58, :64:30, :66:50]
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File tage.scala:
package boom.v4.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v4.common._
import boom.v4.util.{BoomCoreStringPrefix, MaskLower, WrapInc}
import scala.math.min
class TageResp extends Bundle {
val ctr = UInt(3.W)
val u = UInt(2.W)
}
class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int, val singlePorted: Boolean)
(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
require(histLength <= globalHistoryLength)
val nWrBypassEntries = 2
val io = IO( new Bundle {
val f1_req_valid = Input(Bool())
val f1_req_pc = Input(UInt(vaddrBitsExtended.W))
val f1_req_ghist = Input(UInt(globalHistoryLength.W))
val f2_resp = Output(Vec(bankWidth, Valid(new TageResp)))
val update_mask = Input(Vec(bankWidth, Bool()))
val update_taken = Input(Vec(bankWidth, Bool()))
val update_alloc = Input(Vec(bankWidth, Bool()))
val update_old_ctr = Input(Vec(bankWidth, UInt(3.W)))
val update_pc = Input(UInt())
val update_hist = Input(UInt())
val update_u_mask = Input(Vec(bankWidth, Bool()))
val update_u = Input(Vec(bankWidth, UInt(2.W)))
})
def compute_folded_hist(hist: UInt, l: Int) = {
val nChunks = (histLength + l - 1) / l
val hist_chunks = (0 until nChunks) map {i =>
hist(min((i+1)*l, histLength)-1, i*l)
}
hist_chunks.reduce(_^_)
}
def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = {
val idx_history = compute_folded_hist(hist, log2Ceil(nRows))
val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0)
val tag_history = compute_folded_hist(hist, tagSz)
val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0)
(idx, tag)
}
def inc_ctr(ctr: UInt, taken: Bool): UInt = {
Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U),
Mux(ctr === 7.U, 7.U, ctr + 1.U))
}
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nRows).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nRows-1).U) { doing_reset := false.B }
class TageEntry extends Bundle {
val valid = Bool() // TODO: Remove this valid bit
val tag = UInt(tagSz.W)
val ctr = UInt(3.W)
}
val tageEntrySz = 1 + tagSz + 3
val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist)
val us = SyncReadMem(nRows, Vec(bankWidth*2, Bool()))
val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W)))
us.suggestName(s"tage_u_${histLength}")
table.suggestName(s"tage_table_${histLength}")
val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz))
val s2_tag = RegNext(s1_tag)
val s2_req_rtage = Wire(Vec(bankWidth, new TageEntry))
val s2_req_rus = Wire(Vec(bankWidth*2, Bool()))
val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset))
for (w <- 0 until bankWidth) {
// This bit indicates the TAGE table matched here
io.f2_resp(w).valid := s2_req_rhits(w)
io.f2_resp(w).bits.u := Cat(s2_req_rus(w*2+1), s2_req_rus(w*2))
io.f2_resp(w).bits.ctr := s2_req_rtage(w).ctr
}
val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W))
when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U }
val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U
val clear_u_hi = clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U
val clear_u_lo = clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U
val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod)
val clear_u_mask = VecInit((0 until bankWidth*2) map { i => if (i % 2 == 0) clear_u_lo else clear_u_hi }).asUInt
val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist)
val update_wdata = Wire(Vec(bankWidth, new TageEntry))
val wen = WireInit(doing_reset || io.update_mask.reduce(_||_))
val rdata = if (singlePorted) table.read(s1_hashed_idx, !wen && io.f1_req_valid) else table.read(s1_hashed_idx, io.f1_req_valid)
when (RegNext(wen) && singlePorted.B) {
s2_req_rtage := 0.U.asTypeOf(Vec(bankWidth, new TageEntry))
} .otherwise {
s2_req_rtage := VecInit(rdata.map(_.asTypeOf(new TageEntry)))
}
when (wen) {
val widx = Mux(doing_reset, reset_idx, update_idx)
val wdata = Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt)))
val wmask = Mux(doing_reset, ~(0.U(bankWidth.W)), io.update_mask.asUInt)
table.write(widx, wdata, wmask.asBools)
}
val update_u_mask = VecInit((0 until bankWidth*2) map {i => io.update_u_mask(i / 2)})
val update_u_wen = WireInit(doing_reset || doing_clear_u || update_u_mask.reduce(_||_))
val u_rdata = if (singlePorted) {
us.read(s1_hashed_idx, !update_u_wen && io.f1_req_valid)
} else {
us.read(s1_hashed_idx, io.f1_req_valid)
}
s2_req_rus := u_rdata
when (update_u_wen) {
val widx = Mux(doing_reset, reset_idx, Mux(doing_clear_u, clear_u_idx, update_idx))
val wdata = Mux(doing_reset || doing_clear_u, VecInit(0.U((bankWidth*2).W).asBools), VecInit(io.update_u.asUInt.asBools))
val wmask = Mux(doing_reset, ~(0.U((bankWidth*2).W)), Mux(doing_clear_u, clear_u_mask, update_u_mask.asUInt))
us.write(widx, wdata, wmask.asBools)
}
val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W)))
val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W)))
val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W))))
val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W))
val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i =>
!doing_reset &&
wrbypass_tags(i) === update_tag &&
wrbypass_idxs(i) === update_idx
})
val wrbypass_hit = wrbypass_hits.reduce(_||_)
val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits)
for (w <- 0 until bankWidth) {
update_wdata(w).ctr := Mux(io.update_alloc(w),
Mux(io.update_taken(w), 4.U,
3.U
),
Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)),
inc_ctr(io.update_old_ctr(w), io.update_taken(w))
)
)
update_wdata(w).valid := true.B
update_wdata(w).tag := update_tag
}
when (io.update_mask.reduce(_||_)) {
when (wrbypass_hits.reduce(_||_)) {
wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr))
} .otherwise {
wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr))
wrbypass_tags(wrbypass_enq_idx) := update_tag
wrbypass_idxs(wrbypass_enq_idx) := update_idx
wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries)
}
}
}
case class BoomTageParams(
// nSets, histLen, tagSz
tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7),
( 128, 4, 7),
( 256, 8, 8),
( 256, 16, 8),
( 128, 32, 9),
( 128, 64, 9)),
uBitPeriod: Int = 2048,
singlePorted: Boolean = false
)
class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tageUBitPeriod = params.uBitPeriod
val tageNTables = params.tableInfo.size
class TageMeta extends Bundle
{
val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
val alt_differs = Vec(bankWidth, Output(Bool()))
val provider_u = Vec(bankWidth, Output(UInt(2.W)))
val provider_ctr = Vec(bankWidth, Output(UInt(3.W)))
val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W)))
}
val f3_meta = Wire(new TageMeta)
override val metaSz = f3_meta.asUInt.getWidth
require(metaSz <= bpdMaxMetaLength)
def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = {
Mux(!alt_differs, u,
Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U),
Mux(u === 3.U, 3.U, u + 1.U)))
}
val tt = params.tableInfo map {
case (n, l, s) => {
val t = Module(new TageTable(n, s, l, params.uBitPeriod, params.singlePorted))
t.io.f1_req_valid := RegNext(io.f0_valid)
t.io.f1_req_pc := RegNext(bankAlign(io.f0_pc))
t.io.f1_req_ghist := io.f1_ghist
(t, t.mems)
}
}
val tables = tt.map(_._1)
val mems = tt.map(_._2).flatten
val f2_resps = VecInit(tables.map(_.io.f2_resp))
val f3_resps = RegNext(f2_resps)
val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta)
val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) &
Fill(bankWidth, s1_update.bits.cfi_mispredicted)
val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool()))))
val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W)))))
val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W))))
val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool())))
val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W))))
s1_update_taken := DontCare
s1_update_old_ctr := DontCare
s1_update_alloc := DontCare
s1_update_u := DontCare
for (w <- 0 until bankWidth) {
var s2_provided = false.B
var s2_provider = 0.U
var s2_alt_provided = false.B
var s2_alt_provider = 0.U
for (i <- 0 until tageNTables) {
val hit = f2_resps(i)(w).valid
s2_alt_provided = s2_alt_provided || (s2_provided && hit)
s2_provided = s2_provided || hit
s2_alt_provider = Mux(hit, s2_provider, s2_alt_provider)
s2_provider = Mux(hit, i.U, s2_provider)
}
val s3_provided = RegNext(s2_provided)
val s3_provider = RegNext(s2_provider)
val s3_alt_provided = RegNext(s2_alt_provided)
val s3_alt_provider = RegNext(s2_alt_provider)
val prov = RegNext(f2_resps(s2_provider)(w).bits)
val alt = RegNext(f2_resps(s2_alt_provider)(w).bits)
io.resp.f3(w).taken := Mux(s3_provided,
Mux(prov.ctr === 3.U || prov.ctr === 4.U,
Mux(s3_alt_provided, alt.ctr(2), io.resp_in(0).f3(w).taken),
prov.ctr(2)),
io.resp_in(0).f3(w).taken
)
f3_meta.provider(w).valid := s3_provided
f3_meta.provider(w).bits := s3_provider
f3_meta.alt_differs(w) := s3_alt_provided && alt.ctr(2) =/= io.resp.f3(w).taken
f3_meta.provider_u(w) := prov.u
f3_meta.provider_ctr(w) := prov.ctr
// Create a mask of tables which did not hit our query, and also contain useless entries
// and also uses a longer history than the provider
val allocatable_slots = (
VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt &
~(MaskLower(UIntToOH(f3_meta.provider(w).bits)) & Fill(tageNTables, f3_meta.provider(w).valid))
)
val alloc_lfsr = random.LFSR(tageNTables max 2)
val first_entry = PriorityEncoder(allocatable_slots)
val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr)
val alloc_entry = Mux(allocatable_slots(masked_entry),
masked_entry,
first_entry)
f3_meta.allocate(w).valid := allocatable_slots =/= 0.U
f3_meta.allocate(w).bits := alloc_entry
val update_was_taken = (s1_update.bits.cfi_idx.valid &&
(s1_update.bits.cfi_idx.bits === w.U) &&
s1_update.bits.cfi_taken)
when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) {
when (s1_update_meta.provider(w).valid) {
val provider = s1_update_meta.provider(w).bits
s1_update_mask(provider)(w) := true.B
s1_update_u_mask(provider)(w) := true.B
val new_u = inc_u(s1_update_meta.provider_u(w),
s1_update_meta.alt_differs(w),
s1_update_mispredict_mask(w))
s1_update_u (provider)(w) := new_u
s1_update_taken (provider)(w) := update_was_taken
s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w)
s1_update_alloc (provider)(w) := false.B
}
}
}
when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) {
val idx = s1_update.bits.cfi_idx.bits
val allocate = s1_update_meta.allocate(idx)
when (allocate.valid) {
s1_update_mask (allocate.bits)(idx) := true.B
s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken
s1_update_alloc(allocate.bits)(idx) := true.B
s1_update_u_mask(allocate.bits)(idx) := true.B
s1_update_u (allocate.bits)(idx) := 0.U
} .otherwise {
val provider = s1_update_meta.provider(idx)
val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U)
for (i <- 0 until tageNTables) {
when (decr_mask(i)) {
s1_update_u_mask(i)(idx) := true.B
s1_update_u (i)(idx) := 0.U
}
}
}
}
for (i <- 0 until tageNTables) {
for (w <- 0 until bankWidth) {
tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w))
tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w))
tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w))
tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w))
tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w))
tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w))
}
tables(i).io.update_pc := RegNext(s1_update.bits.pc)
tables(i).io.update_hist := RegNext(s1_update.bits.ghist)
}
//io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0))
io.f3_meta := f3_meta.asUInt
}
| module tage_u_2_0( // @[tage.scala:89:27]
input [6:0] R0_addr,
input R0_en,
input R0_clk,
output [7:0] R0_data,
input [6:0] W0_addr,
input W0_en,
input W0_clk,
input [7:0] W0_data,
input [7:0] W0_mask
);
tage_u_2_ext tage_u_2_ext ( // @[tage.scala:89:27]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
); // @[tage.scala:89:27]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_160( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_175 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_16( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_EntryData_77( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_g, // @[package.scala:268:18]
output io_y_ae, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c, // @[package.scala:268:18]
output io_y_fragmented_superpage // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_0 = io_x_ae; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g_0 = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_0 = io_x_ae_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage_0 = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_g = io_y_g_0; // @[package.scala:267:30]
assign io_y_ae = io_y_ae_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
assign io_y_fragmented_superpage = io_y_fragmented_superpage_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File RegisterRouter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.resources.{Device, Resource, ResourceBindings}
import freechips.rocketchip.prci.{NoCrossing}
import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter}
import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno}
import scala.math.min
class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle {
val source = UInt((sourceBits max 1).W)
val size = UInt((sizeBits max 1).W)
}
case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra")
case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => {
x.size := 0.U
x.source := 0.U
})
/** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers.
* It provides functionality for describing and outputting metdata about the registers in several formats.
* It also provides a concrete implementation of a regmap function that will be used
* to wire a map of internal registers associated with this node to the node's interconnect port.
*/
case class TLRegisterNode(
address: Seq[AddressSet],
device: Device,
deviceKey: String = "reg/control",
concurrency: Int = 0,
beatBytes: Int = 4,
undefZero: Boolean = true,
executable: Boolean = false)(
implicit valName: ValName)
extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1(
Seq(TLSlaveParameters.v1(
address = address,
resources = Seq(Resource(device, deviceKey)),
executable = executable,
supportsGet = TransferSizes(1, beatBytes),
supportsPutPartial = TransferSizes(1, beatBytes),
supportsPutFull = TransferSizes(1, beatBytes),
fifoId = Some(0))), // requests are handled in order
beatBytes = beatBytes,
minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle
{
val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min)
require (size >= beatBytes)
address.foreach { case a =>
require (a.widen(size-1).base == address.head.widen(size-1).base,
s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}")
}
// Calling this method causes the matching TL2 bundle to be
// configured to route all requests to the listed RegFields.
def regmap(mapping: RegField.Map*) = {
val (bundleIn, edge) = this.in(0)
val a = bundleIn.a
val d = bundleIn.d
val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields
val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields)
val in = Wire(Decoupled(new RegMapperInput(params)))
in.bits.read := a.bits.opcode === TLMessages.Get
in.bits.index := edge.addr_hi(a.bits)
in.bits.data := a.bits.data
in.bits.mask := a.bits.mask
Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match {
case (lhs, rhs) => lhs :<= rhs
}
val a_extra = in.bits.extra(TLRegisterRouterExtra)
a_extra.source := a.bits.source
a_extra.size := a.bits.size
// Invoke the register map builder
val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*)
// No flow control needed
in.valid := a.valid
a.ready := in.ready
d.valid := out.valid
out.ready := d.ready
// We must restore the size to enable width adapters to work
val d_extra = out.bits.extra(TLRegisterRouterExtra)
d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size)
// avoid a Mux on the data bus by manually overriding two fields
d.bits.data := out.bits.data
Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match {
case (lhs, rhs) => lhs :<= rhs
}
d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck)
// Tie off unused channels
bundleIn.b.valid := false.B
bundleIn.c.ready := true.B
bundleIn.e.ready := true.B
genRegDescsJson(mapping:_*)
}
def genRegDescsJson(mapping: RegField.Map*): Unit = {
// Dump out the register map for documentation purposes.
val base = address.head.base
val baseHex = s"0x${base.toInt.toHexString}"
val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}"
val json = GenRegDescsAnno.serialize(base, name, mapping:_*)
var suffix = 0
while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) {
suffix = suffix + 1
}
ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json)
val module = Module.currentModule.get.asInstanceOf[RawModule]
GenRegDescsAnno.anno(
module,
base,
mapping:_*)
}
}
/** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink.
* - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers.
* - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect.
* - Use the mapping helper function regmap to internally fill out the space of device control registers.
*/
trait HasTLControlRegMap { this: RegisterRouter =>
protected val controlNode = TLRegisterNode(
address = address,
device = device,
deviceKey = "reg/control",
concurrency = concurrency,
beatBytes = beatBytes,
undefZero = undefZero,
executable = executable)
// Externally, this helper should be used to connect the register control port to a bus
val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode)
// Backwards-compatibility default node accessor with no clock crossing
lazy val node: TLInwardNode = controlXing(NoCrossing)
// Internally, this function should be used to populate the control port with registers
protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) }
}
File MuxLiteral.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.log2Ceil
import scala.reflect.ClassTag
/* MuxLiteral creates a lookup table from a key to a list of values.
* Unlike MuxLookup, the table keys must be exclusive literals.
*/
object MuxLiteral
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (UInt, T), rest: (UInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(UInt, T)]): T =
MuxTable(index, default, cases.map { case (k, v) => (k.litValue, v) })
}
object MuxSeq
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: T, rest: T*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[T]): T =
MuxTable(index, default, cases.zipWithIndex.map { case (v, i) => (BigInt(i), v) })
}
object MuxTable
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (BigInt, T), rest: (BigInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(BigInt, T)]): T = {
/* All keys must be >= 0 and distinct */
cases.foreach { case (k, _) => require (k >= 0) }
require (cases.map(_._1).distinct.size == cases.size)
/* Filter out any cases identical to the default */
val simple = cases.filter { case (k, v) => !default.isLit || !v.isLit || v.litValue != default.litValue }
val maxKey = (BigInt(0) +: simple.map(_._1)).max
val endIndex = BigInt(1) << log2Ceil(maxKey+1)
if (simple.isEmpty) {
default
} else if (endIndex <= 2*simple.size) {
/* The dense encoding case uses a Vec */
val table = Array.fill(endIndex.toInt) { default }
simple.foreach { case (k, v) => table(k.toInt) = v }
Mux(index >= endIndex.U, default, VecInit(table)(index))
} else {
/* The sparse encoding case uses switch */
val out = WireDefault(default)
simple.foldLeft(new chisel3.util.SwitchContext(index, None, Set.empty)) { case (acc, (k, v)) =>
acc.is (k.U) { out := v }
}
out
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Control.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.tilelink._
class InclusiveCacheControl(outer: InclusiveCache, control: InclusiveCacheControlParameters)(implicit p: Parameters) extends LazyModule()(p) {
val ctrlnode = TLRegisterNode(
address = Seq(AddressSet(control.address, InclusiveCacheParameters.L2ControlSize-1)),
device = outer.device,
concurrency = 1, // Only one flush at a time (else need to track who answers)
beatBytes = control.beatBytes)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val flush_match = Input(Bool())
val flush_req = Decoupled(UInt(64.W))
val flush_resp = Input(Bool())
})
// Flush directive
val flushInValid = RegInit(false.B)
val flushInAddress = Reg(UInt(64.W))
val flushOutValid = RegInit(false.B)
val flushOutReady = WireInit(init = false.B)
when (flushOutReady) { flushOutValid := false.B }
when (io.flush_resp) { flushOutValid := true.B }
when (io.flush_req.ready) { flushInValid := false.B }
io.flush_req.valid := flushInValid
io.flush_req.bits := flushInAddress
when (!io.flush_match && flushInValid) {
flushInValid := false.B
flushOutValid := true.B
}
val flush32 = RegField.w(32, RegWriteFn((ivalid, oready, data) => {
when (oready) { flushOutReady := true.B }
when (ivalid) { flushInValid := true.B }
when (ivalid && !flushInValid) { flushInAddress := data << 4 }
(!flushInValid, flushOutValid)
}), RegFieldDesc("Flush32", "Flush the physical address equal to the 32-bit written data << 4 from the cache"))
val flush64 = RegField.w(64, RegWriteFn((ivalid, oready, data) => {
when (oready) { flushOutReady := true.B }
when (ivalid) { flushInValid := true.B }
when (ivalid && !flushInValid) { flushInAddress := data }
(!flushInValid, flushOutValid)
}), RegFieldDesc("Flush64", "Flush the phsyical address equal to the 64-bit written data from the cache"))
// Information about the cache configuration
val banksR = RegField.r(8, outer.node.edges.in.size.U, RegFieldDesc("Banks",
"Number of banks in the cache", reset=Some(outer.node.edges.in.size)))
val waysR = RegField.r(8, outer.cache.ways.U, RegFieldDesc("Ways",
"Number of ways per bank", reset=Some(outer.cache.ways)))
val lgSetsR = RegField.r(8, log2Ceil(outer.cache.sets).U, RegFieldDesc("lgSets",
"Base-2 logarithm of the sets per bank", reset=Some(log2Ceil(outer.cache.sets))))
val lgBlockBytesR = RegField.r(8, log2Ceil(outer.cache.blockBytes).U, RegFieldDesc("lgBlockBytes",
"Base-2 logarithm of the bytes per cache block", reset=Some(log2Ceil(outer.cache.blockBytes))))
val regmap = ctrlnode.regmap(
0x000 -> RegFieldGroup("Config", Some("Information about the Cache Configuration"), Seq(banksR, waysR, lgSetsR, lgBlockBytesR)),
0x200 -> (if (control.beatBytes >= 8) Seq(flush64) else Nil),
0x240 -> Seq(flush32)
)
}
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module InclusiveCacheControl( // @[Control.scala:38:9]
input clock, // @[Control.scala:38:9]
input reset, // @[Control.scala:38:9]
output auto_ctrl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_ctrl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [12:0] auto_ctrl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [25:0] auto_ctrl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_ctrl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_ctrl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_ctrl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_ctrl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_ctrl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_ctrl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [12:0] auto_ctrl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_ctrl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input io_flush_match, // @[Control.scala:39:16]
input io_flush_req_ready, // @[Control.scala:39:16]
output io_flush_req_valid, // @[Control.scala:39:16]
output [63:0] io_flush_req_bits, // @[Control.scala:39:16]
input io_flush_resp // @[Control.scala:39:16]
);
wire out_bits_read; // @[RegisterRouter.scala:87:24]
wire [12:0] out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [8:0] in_bits_index; // @[RegisterRouter.scala:73:18]
wire in_bits_read; // @[RegisterRouter.scala:73:18]
wire _out_back_front_q_io_deq_valid; // @[RegisterRouter.scala:87:24]
wire _out_back_front_q_io_deq_bits_read; // @[RegisterRouter.scala:87:24]
wire [8:0] _out_back_front_q_io_deq_bits_index; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_back_front_q_io_deq_bits_data; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_back_front_q_io_deq_bits_mask; // @[RegisterRouter.scala:87:24]
wire auto_ctrl_in_a_valid_0 = auto_ctrl_in_a_valid; // @[Control.scala:38:9]
wire [2:0] auto_ctrl_in_a_bits_opcode_0 = auto_ctrl_in_a_bits_opcode; // @[Control.scala:38:9]
wire [2:0] auto_ctrl_in_a_bits_param_0 = auto_ctrl_in_a_bits_param; // @[Control.scala:38:9]
wire [1:0] auto_ctrl_in_a_bits_size_0 = auto_ctrl_in_a_bits_size; // @[Control.scala:38:9]
wire [12:0] auto_ctrl_in_a_bits_source_0 = auto_ctrl_in_a_bits_source; // @[Control.scala:38:9]
wire [25:0] auto_ctrl_in_a_bits_address_0 = auto_ctrl_in_a_bits_address; // @[Control.scala:38:9]
wire [7:0] auto_ctrl_in_a_bits_mask_0 = auto_ctrl_in_a_bits_mask; // @[Control.scala:38:9]
wire [63:0] auto_ctrl_in_a_bits_data_0 = auto_ctrl_in_a_bits_data; // @[Control.scala:38:9]
wire auto_ctrl_in_a_bits_corrupt_0 = auto_ctrl_in_a_bits_corrupt; // @[Control.scala:38:9]
wire auto_ctrl_in_d_ready_0 = auto_ctrl_in_d_ready; // @[Control.scala:38:9]
wire io_flush_match_0 = io_flush_match; // @[Control.scala:38:9]
wire io_flush_req_ready_0 = io_flush_req_ready; // @[Control.scala:38:9]
wire io_flush_resp_0 = io_flush_resp; // @[Control.scala:38:9]
wire [3:0][63:0] _GEN = '{64'h0, 64'h0, 64'h0, 64'h60A0801};
wire [8:0] out_maskMatch = 9'h1B7; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_13 = 8'h1; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_14 = 8'h1; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_prepend_T = 8'h1; // @[RegisterRouter.scala:87:24]
wire [11:0] out_prepend = 12'h801; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_T_22 = 16'h801; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_T_23 = 16'h801; // @[RegisterRouter.scala:87:24]
wire [15:0] _out_prepend_T_1 = 16'h801; // @[RegisterRouter.scala:87:24]
wire [19:0] out_prepend_1 = 20'hA0801; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_T_31 = 24'hA0801; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_T_32 = 24'hA0801; // @[RegisterRouter.scala:87:24]
wire [23:0] _out_prepend_T_2 = 24'hA0801; // @[RegisterRouter.scala:87:24]
wire [26:0] out_prepend_2 = 27'h60A0801; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_40 = 32'h60A0801; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_41 = 32'h60A0801; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_66 = 32'h0; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_67 = 32'h0; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_out_bits_data_WIRE_1_0 = 64'h60A0801; // @[MuxLiteral.scala:49:48]
wire [2:0] ctrlnodeIn_d_bits_d_opcode = 3'h0; // @[Edges.scala:792:17]
wire [63:0] _out_T_53 = 64'h0; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_T_54 = 64'h0; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_out_bits_data_WIRE_1_1 = 64'h0; // @[MuxLiteral.scala:49:48]
wire [63:0] _out_out_bits_data_WIRE_1_2 = 64'h0; // @[MuxLiteral.scala:49:48]
wire [63:0] _out_out_bits_data_WIRE_1_3 = 64'h0; // @[MuxLiteral.scala:49:48]
wire [63:0] ctrlnodeIn_d_bits_d_data = 64'h0; // @[Edges.scala:792:17]
wire auto_ctrl_in_d_bits_sink = 1'h0; // @[Control.scala:38:9]
wire auto_ctrl_in_d_bits_denied = 1'h0; // @[Control.scala:38:9]
wire auto_ctrl_in_d_bits_corrupt = 1'h0; // @[Control.scala:38:9]
wire ctrlnodeIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire ctrlnodeIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire ctrlnodeIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire _out_rifireMux_T_8 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_18 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wifireMux_T_9 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_19 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_rofireMux_T_8 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_18 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wofireMux_T_9 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_19 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T_2 = 1'h0; // @[MuxLiteral.scala:49:17]
wire ctrlnodeIn_d_bits_d_sink = 1'h0; // @[Edges.scala:792:17]
wire ctrlnodeIn_d_bits_d_denied = 1'h0; // @[Edges.scala:792:17]
wire ctrlnodeIn_d_bits_d_corrupt = 1'h0; // @[Edges.scala:792:17]
wire [1:0] auto_ctrl_in_d_bits_param = 2'h0; // @[Control.scala:38:9]
wire [1:0] ctrlnodeIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] ctrlnodeIn_d_bits_d_param = 2'h0; // @[Edges.scala:792:17]
wire ctrlnodeIn_a_ready; // @[MixedNode.scala:551:17]
wire out_rifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_9 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out_2 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_13 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out_3 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_17 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_WIRE_2 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_WIRE_3 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_10 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_wifireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rofireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_9 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rofireMux_out_2 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_13 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rofireMux_out_3 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_17 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rofireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rofireMux_WIRE_2 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_rofireMux_WIRE_3 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_out_1 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_10 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_wofireMux_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire _out_out_bits_data_WIRE_1 = 1'h1; // @[MuxLiteral.scala:49:48]
wire ctrlnodeIn_a_valid = auto_ctrl_in_a_valid_0; // @[Control.scala:38:9]
wire [2:0] ctrlnodeIn_a_bits_opcode = auto_ctrl_in_a_bits_opcode_0; // @[Control.scala:38:9]
wire [2:0] ctrlnodeIn_a_bits_param = auto_ctrl_in_a_bits_param_0; // @[Control.scala:38:9]
wire [1:0] ctrlnodeIn_a_bits_size = auto_ctrl_in_a_bits_size_0; // @[Control.scala:38:9]
wire [12:0] ctrlnodeIn_a_bits_source = auto_ctrl_in_a_bits_source_0; // @[Control.scala:38:9]
wire [25:0] ctrlnodeIn_a_bits_address = auto_ctrl_in_a_bits_address_0; // @[Control.scala:38:9]
wire [7:0] ctrlnodeIn_a_bits_mask = auto_ctrl_in_a_bits_mask_0; // @[Control.scala:38:9]
wire [63:0] ctrlnodeIn_a_bits_data = auto_ctrl_in_a_bits_data_0; // @[Control.scala:38:9]
wire ctrlnodeIn_a_bits_corrupt = auto_ctrl_in_a_bits_corrupt_0; // @[Control.scala:38:9]
wire ctrlnodeIn_d_ready = auto_ctrl_in_d_ready_0; // @[Control.scala:38:9]
wire ctrlnodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] ctrlnodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] ctrlnodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [12:0] ctrlnodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] ctrlnodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire auto_ctrl_in_a_ready_0; // @[Control.scala:38:9]
wire [2:0] auto_ctrl_in_d_bits_opcode_0; // @[Control.scala:38:9]
wire [1:0] auto_ctrl_in_d_bits_size_0; // @[Control.scala:38:9]
wire [12:0] auto_ctrl_in_d_bits_source_0; // @[Control.scala:38:9]
wire [63:0] auto_ctrl_in_d_bits_data_0; // @[Control.scala:38:9]
wire auto_ctrl_in_d_valid_0; // @[Control.scala:38:9]
wire io_flush_req_valid_0; // @[Control.scala:38:9]
wire [63:0] io_flush_req_bits_0; // @[Control.scala:38:9]
wire in_ready; // @[RegisterRouter.scala:73:18]
assign auto_ctrl_in_a_ready_0 = ctrlnodeIn_a_ready; // @[Control.scala:38:9]
wire in_valid = ctrlnodeIn_a_valid; // @[RegisterRouter.scala:73:18]
wire [1:0] in_bits_extra_tlrr_extra_size = ctrlnodeIn_a_bits_size; // @[RegisterRouter.scala:73:18]
wire [12:0] in_bits_extra_tlrr_extra_source = ctrlnodeIn_a_bits_source; // @[RegisterRouter.scala:73:18]
wire [7:0] in_bits_mask = ctrlnodeIn_a_bits_mask; // @[RegisterRouter.scala:73:18]
wire [63:0] in_bits_data = ctrlnodeIn_a_bits_data; // @[RegisterRouter.scala:73:18]
wire out_ready = ctrlnodeIn_d_ready; // @[RegisterRouter.scala:87:24]
wire out_valid; // @[RegisterRouter.scala:87:24]
assign auto_ctrl_in_d_valid_0 = ctrlnodeIn_d_valid; // @[Control.scala:38:9]
assign auto_ctrl_in_d_bits_opcode_0 = ctrlnodeIn_d_bits_opcode; // @[Control.scala:38:9]
wire [1:0] ctrlnodeIn_d_bits_d_size; // @[Edges.scala:792:17]
assign auto_ctrl_in_d_bits_size_0 = ctrlnodeIn_d_bits_size; // @[Control.scala:38:9]
wire [12:0] ctrlnodeIn_d_bits_d_source; // @[Edges.scala:792:17]
assign auto_ctrl_in_d_bits_source_0 = ctrlnodeIn_d_bits_source; // @[Control.scala:38:9]
wire [63:0] out_bits_data; // @[RegisterRouter.scala:87:24]
assign auto_ctrl_in_d_bits_data_0 = ctrlnodeIn_d_bits_data; // @[Control.scala:38:9]
reg flushInValid; // @[Control.scala:45:33]
assign io_flush_req_valid_0 = flushInValid; // @[Control.scala:38:9, :45:33]
reg [63:0] flushInAddress; // @[Control.scala:46:29]
assign io_flush_req_bits_0 = flushInAddress; // @[Control.scala:38:9, :46:29]
reg flushOutValid; // @[Control.scala:47:33]
wire flushOutReady; // @[Control.scala:48:34]
wire _out_in_ready_T; // @[RegisterRouter.scala:87:24]
assign ctrlnodeIn_a_ready = in_ready; // @[RegisterRouter.scala:73:18]
wire _in_bits_read_T; // @[RegisterRouter.scala:74:36]
wire out_front_bits_read = in_bits_read; // @[RegisterRouter.scala:73:18, :87:24]
wire [8:0] out_front_bits_index = in_bits_index; // @[RegisterRouter.scala:73:18, :87:24]
wire [63:0] out_front_bits_data = in_bits_data; // @[RegisterRouter.scala:73:18, :87:24]
wire [7:0] out_front_bits_mask = in_bits_mask; // @[RegisterRouter.scala:73:18, :87:24]
wire [12:0] out_front_bits_extra_tlrr_extra_source = in_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:73:18, :87:24]
wire [1:0] out_front_bits_extra_tlrr_extra_size = in_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:73:18, :87:24]
assign _in_bits_read_T = ctrlnodeIn_a_bits_opcode == 3'h4; // @[RegisterRouter.scala:74:36]
assign in_bits_read = _in_bits_read_T; // @[RegisterRouter.scala:73:18, :74:36]
wire [22:0] _in_bits_index_T = ctrlnodeIn_a_bits_address[25:3]; // @[Edges.scala:192:34]
assign in_bits_index = _in_bits_index_T[8:0]; // @[RegisterRouter.scala:73:18, :75:19]
wire _out_out_valid_T; // @[RegisterRouter.scala:87:24]
assign ctrlnodeIn_d_valid = out_valid; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_out_bits_data_T_4; // @[RegisterRouter.scala:87:24]
wire _ctrlnodeIn_d_bits_opcode_T = out_bits_read; // @[RegisterRouter.scala:87:24, :105:25]
assign ctrlnodeIn_d_bits_data = out_bits_data; // @[RegisterRouter.scala:87:24]
assign ctrlnodeIn_d_bits_d_source = out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [1:0] out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign ctrlnodeIn_d_bits_d_size = out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
wire _out_front_valid_T; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_T_42 = out_front_bits_data; // @[RegisterRouter.scala:87:24]
wire out_front_ready; // @[RegisterRouter.scala:87:24]
wire out_front_valid; // @[RegisterRouter.scala:87:24]
wire [8:0] out_findex = out_front_bits_index & 9'h1B7; // @[RegisterRouter.scala:87:24]
wire [8:0] out_bindex = _out_back_front_q_io_deq_bits_index & 9'h1B7; // @[RegisterRouter.scala:87:24]
wire _GEN_0 = out_findex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_T; // @[RegisterRouter.scala:87:24]
assign _out_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_T_2; // @[RegisterRouter.scala:87:24]
assign _out_T_2 = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_T_4; // @[RegisterRouter.scala:87:24]
assign _out_T_4 = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _GEN_1 = out_bindex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_T_1; // @[RegisterRouter.scala:87:24]
assign _out_T_1 = _GEN_1; // @[RegisterRouter.scala:87:24]
wire _out_T_3; // @[RegisterRouter.scala:87:24]
assign _out_T_3 = _GEN_1; // @[RegisterRouter.scala:87:24]
wire _out_T_5; // @[RegisterRouter.scala:87:24]
assign _out_T_5 = _GEN_1; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_0 = _out_T_1; // @[MuxLiteral.scala:49:48]
wire _out_out_bits_data_WIRE_2 = _out_T_3; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_3 = _out_T_5; // @[MuxLiteral.scala:49:48]
wire _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_15; // @[RegisterRouter.scala:87:24]
wire out_rivalid_0; // @[RegisterRouter.scala:87:24]
wire out_rivalid_1; // @[RegisterRouter.scala:87:24]
wire out_rivalid_2; // @[RegisterRouter.scala:87:24]
wire out_rivalid_3; // @[RegisterRouter.scala:87:24]
wire out_rivalid_4; // @[RegisterRouter.scala:87:24]
wire out_rivalid_5; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_16; // @[RegisterRouter.scala:87:24]
wire out_wivalid_0; // @[RegisterRouter.scala:87:24]
wire out_wivalid_1; // @[RegisterRouter.scala:87:24]
wire out_wivalid_2; // @[RegisterRouter.scala:87:24]
wire out_wivalid_3; // @[RegisterRouter.scala:87:24]
wire out_wivalid_4; // @[RegisterRouter.scala:87:24]
wire out_wivalid_5; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_15; // @[RegisterRouter.scala:87:24]
wire out_roready_0; // @[RegisterRouter.scala:87:24]
wire out_roready_1; // @[RegisterRouter.scala:87:24]
wire out_roready_2; // @[RegisterRouter.scala:87:24]
wire out_roready_3; // @[RegisterRouter.scala:87:24]
wire out_roready_4; // @[RegisterRouter.scala:87:24]
wire out_roready_5; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_16; // @[RegisterRouter.scala:87:24]
wire out_woready_0; // @[RegisterRouter.scala:87:24]
wire out_woready_1; // @[RegisterRouter.scala:87:24]
wire out_woready_2; // @[RegisterRouter.scala:87:24]
wire out_woready_3; // @[RegisterRouter.scala:87:24]
wire out_woready_4; // @[RegisterRouter.scala:87:24]
wire out_woready_5; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_8 = {8{_out_frontMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_9 = {8{_out_frontMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_10 = {8{_out_frontMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_11 = {8{_out_frontMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_12 = {8{_out_frontMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_13 = {8{_out_frontMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_14 = {8{_out_frontMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_15 = {8{_out_frontMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_lo = {_out_frontMask_T_9, _out_frontMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_hi = {_out_frontMask_T_11, _out_frontMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_lo = {out_frontMask_lo_hi, out_frontMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_lo = {_out_frontMask_T_13, _out_frontMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_hi = {_out_frontMask_T_15, _out_frontMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_hi = {out_frontMask_hi_hi, out_frontMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_frontMask = {out_frontMask_hi, out_frontMask_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_rimask_T_4 = out_frontMask; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_wimask_T_4 = out_frontMask; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T = _out_back_front_q_io_deq_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_1 = _out_back_front_q_io_deq_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_2 = _out_back_front_q_io_deq_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_3 = _out_back_front_q_io_deq_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_4 = _out_back_front_q_io_deq_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_5 = _out_back_front_q_io_deq_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_6 = _out_back_front_q_io_deq_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_7 = _out_back_front_q_io_deq_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_8 = {8{_out_backMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_9 = {8{_out_backMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_10 = {8{_out_backMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_11 = {8{_out_backMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_12 = {8{_out_backMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_13 = {8{_out_backMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_14 = {8{_out_backMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_15 = {8{_out_backMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_lo = {_out_backMask_T_9, _out_backMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_hi = {_out_backMask_T_11, _out_backMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_lo = {out_backMask_lo_hi, out_backMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_lo = {_out_backMask_T_13, _out_backMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_hi = {_out_backMask_T_15, _out_backMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_hi = {out_backMask_hi_hi, out_backMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_backMask = {out_backMask_hi, out_backMask_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_romask_T_4 = out_backMask; // @[RegisterRouter.scala:87:24]
wire [63:0] _out_womask_T_4 = out_backMask; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T = out_frontMask[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T = out_frontMask[7:0]; // @[RegisterRouter.scala:87:24]
wire out_rimask = |_out_rimask_T; // @[RegisterRouter.scala:87:24]
wire out_wimask = &_out_wimask_T; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T = out_backMask[7:0]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T = out_backMask[7:0]; // @[RegisterRouter.scala:87:24]
wire out_romask = |_out_romask_T; // @[RegisterRouter.scala:87:24]
wire out_womask = &_out_womask_T; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid = out_rivalid_0 & out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_7 = out_f_rivalid; // @[RegisterRouter.scala:87:24]
wire out_f_roready = out_roready_0 & out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_8 = out_f_roready; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid = out_wivalid_0 & out_wimask; // @[RegisterRouter.scala:87:24]
wire out_f_woready = out_woready_0 & out_womask; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_6 = _out_back_front_q_io_deq_bits_data[7:0]; // @[RegisterRouter.scala:87:24]
wire _out_T_9 = ~out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_10 = ~out_wimask; // @[RegisterRouter.scala:87:24]
wire _out_T_11 = ~out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_12 = ~out_womask; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_1 = out_frontMask[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_1 = out_frontMask[15:8]; // @[RegisterRouter.scala:87:24]
wire out_rimask_1 = |_out_rimask_T_1; // @[RegisterRouter.scala:87:24]
wire out_wimask_1 = &_out_wimask_T_1; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_1 = out_backMask[15:8]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_1 = out_backMask[15:8]; // @[RegisterRouter.scala:87:24]
wire out_romask_1 = |_out_romask_T_1; // @[RegisterRouter.scala:87:24]
wire out_womask_1 = &_out_womask_T_1; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_1 = out_rivalid_1 & out_rimask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_16 = out_f_rivalid_1; // @[RegisterRouter.scala:87:24]
wire out_f_roready_1 = out_roready_1 & out_romask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_17 = out_f_roready_1; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_1 = out_wivalid_1 & out_wimask_1; // @[RegisterRouter.scala:87:24]
wire out_f_woready_1 = out_woready_1 & out_womask_1; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_15 = _out_back_front_q_io_deq_bits_data[15:8]; // @[RegisterRouter.scala:87:24]
wire _out_T_18 = ~out_rimask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_19 = ~out_wimask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_20 = ~out_romask_1; // @[RegisterRouter.scala:87:24]
wire _out_T_21 = ~out_womask_1; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_2 = out_frontMask[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_2 = out_frontMask[23:16]; // @[RegisterRouter.scala:87:24]
wire out_rimask_2 = |_out_rimask_T_2; // @[RegisterRouter.scala:87:24]
wire out_wimask_2 = &_out_wimask_T_2; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_2 = out_backMask[23:16]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_2 = out_backMask[23:16]; // @[RegisterRouter.scala:87:24]
wire out_romask_2 = |_out_romask_T_2; // @[RegisterRouter.scala:87:24]
wire out_womask_2 = &_out_womask_T_2; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_2 = out_rivalid_2 & out_rimask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_25 = out_f_rivalid_2; // @[RegisterRouter.scala:87:24]
wire out_f_roready_2 = out_roready_2 & out_romask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_26 = out_f_roready_2; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_2 = out_wivalid_2 & out_wimask_2; // @[RegisterRouter.scala:87:24]
wire out_f_woready_2 = out_woready_2 & out_womask_2; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_24 = _out_back_front_q_io_deq_bits_data[23:16]; // @[RegisterRouter.scala:87:24]
wire _out_T_27 = ~out_rimask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_28 = ~out_wimask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_29 = ~out_romask_2; // @[RegisterRouter.scala:87:24]
wire _out_T_30 = ~out_womask_2; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_rimask_T_3 = out_frontMask[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_wimask_T_3 = out_frontMask[31:24]; // @[RegisterRouter.scala:87:24]
wire out_rimask_3 = |_out_rimask_T_3; // @[RegisterRouter.scala:87:24]
wire out_wimask_3 = &_out_wimask_T_3; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_romask_T_3 = out_backMask[31:24]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_womask_T_3 = out_backMask[31:24]; // @[RegisterRouter.scala:87:24]
wire out_romask_3 = |_out_romask_T_3; // @[RegisterRouter.scala:87:24]
wire out_womask_3 = &_out_womask_T_3; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_3 = out_rivalid_3 & out_rimask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_34 = out_f_rivalid_3; // @[RegisterRouter.scala:87:24]
wire out_f_roready_3 = out_roready_3 & out_romask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_35 = out_f_roready_3; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_3 = out_wivalid_3 & out_wimask_3; // @[RegisterRouter.scala:87:24]
wire out_f_woready_3 = out_woready_3 & out_womask_3; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_T_33 = _out_back_front_q_io_deq_bits_data[31:24]; // @[RegisterRouter.scala:87:24]
wire _out_T_36 = ~out_rimask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_37 = ~out_wimask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_38 = ~out_romask_3; // @[RegisterRouter.scala:87:24]
wire _out_T_39 = ~out_womask_3; // @[RegisterRouter.scala:87:24]
wire out_rimask_4 = |_out_rimask_T_4; // @[RegisterRouter.scala:87:24]
wire out_wimask_4 = &_out_wimask_T_4; // @[RegisterRouter.scala:87:24]
wire out_romask_4 = |_out_romask_T_4; // @[RegisterRouter.scala:87:24]
wire out_womask_4 = &_out_womask_T_4; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_4 = out_rivalid_4 & out_rimask_4; // @[RegisterRouter.scala:87:24]
wire out_f_roready_4 = out_roready_4 & out_romask_4; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_4 = out_wivalid_4 & out_wimask_4; // @[RegisterRouter.scala:87:24]
wire out_f_woready_4 = out_woready_4 & out_womask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_43 = ~flushInValid; // @[Control.scala:45:33, :71:23]
wire _out_T_44 = out_f_wivalid_4 & _out_T_43; // @[RegisterRouter.scala:87:24]
wire out_f_wiready = ~flushInValid; // @[Control.scala:45:33, :71:23, :72:8]
wire _out_T_45 = out_f_wivalid_4 & out_f_wiready; // @[RegisterRouter.scala:87:24]
wire _out_T_46 = flushOutValid & out_f_woready_4; // @[RegisterRouter.scala:87:24]
wire _out_T_47 = ~out_rimask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_48 = ~out_wimask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_49 = out_f_wiready | _out_T_48; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_out_2 = _out_T_49; // @[RegisterRouter.scala:87:24]
wire _out_T_50 = ~out_romask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_51 = ~out_womask_4; // @[RegisterRouter.scala:87:24]
wire _out_T_52 = flushOutValid | _out_T_51; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_out_2 = _out_T_52; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_rimask_T_5 = out_frontMask[31:0]; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_wimask_T_5 = out_frontMask[31:0]; // @[RegisterRouter.scala:87:24]
wire out_rimask_5 = |_out_rimask_T_5; // @[RegisterRouter.scala:87:24]
wire out_wimask_5 = &_out_wimask_T_5; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_romask_T_5 = out_backMask[31:0]; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_womask_T_5 = out_backMask[31:0]; // @[RegisterRouter.scala:87:24]
wire out_romask_5 = |_out_romask_T_5; // @[RegisterRouter.scala:87:24]
wire out_womask_5 = &_out_womask_T_5; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid_5 = out_rivalid_5 & out_rimask_5; // @[RegisterRouter.scala:87:24]
wire out_f_roready_5 = out_roready_5 & out_romask_5; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid_5 = out_wivalid_5 & out_wimask_5; // @[RegisterRouter.scala:87:24]
wire out_f_woready_5 = out_woready_5 & out_womask_5; // @[RegisterRouter.scala:87:24]
wire [31:0] _out_T_55 = out_front_bits_data[31:0]; // @[RegisterRouter.scala:87:24]
assign flushOutReady = out_f_woready_5 | out_f_woready_4; // @[RegisterRouter.scala:87:24]
wire _out_T_56 = ~flushInValid; // @[Control.scala:45:33, :64:23, :71:23]
wire _out_T_57 = out_f_wivalid_5 & _out_T_56; // @[RegisterRouter.scala:87:24]
wire [35:0] _out_flushInAddress_T = {_out_T_55, 4'h0}; // @[RegisterRouter.scala:87:24]
wire out_f_wiready_1 = ~flushInValid; // @[Control.scala:45:33, :65:8, :71:23]
wire _out_T_58 = out_f_wivalid_5 & out_f_wiready_1; // @[RegisterRouter.scala:87:24]
wire _out_T_59 = flushOutValid & out_f_woready_5; // @[RegisterRouter.scala:87:24]
wire _out_T_60 = ~out_rimask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_61 = ~out_wimask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_62 = out_f_wiready_1 | _out_T_61; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_out_3 = _out_T_62; // @[RegisterRouter.scala:87:24]
wire _out_T_63 = ~out_romask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_64 = ~out_womask_5; // @[RegisterRouter.scala:87:24]
wire _out_T_65 = flushOutValid | _out_T_64; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_out_3 = _out_T_65; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T = out_front_bits_index[0]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_1 = out_front_bits_index[1]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_2 = out_front_bits_index[2]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_3 = out_front_bits_index[3]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_4 = out_front_bits_index[4]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_5 = out_front_bits_index[5]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_6 = out_front_bits_index[6]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_7 = out_front_bits_index[7]; // @[RegisterRouter.scala:87:24]
wire _out_iindex_T_8 = out_front_bits_index[8]; // @[RegisterRouter.scala:87:24]
wire [1:0] out_iindex = {_out_iindex_T_6, _out_iindex_T_3}; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T = _out_back_front_q_io_deq_bits_index[0]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_1 = _out_back_front_q_io_deq_bits_index[1]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_2 = _out_back_front_q_io_deq_bits_index[2]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_3 = _out_back_front_q_io_deq_bits_index[3]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_4 = _out_back_front_q_io_deq_bits_index[4]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_5 = _out_back_front_q_io_deq_bits_index[5]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_6 = _out_back_front_q_io_deq_bits_index[6]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_7 = _out_back_front_q_io_deq_bits_index[7]; // @[RegisterRouter.scala:87:24]
wire _out_oindex_T_8 = _out_back_front_q_io_deq_bits_index[8]; // @[RegisterRouter.scala:87:24]
wire [1:0] out_oindex = {_out_oindex_T_6, _out_oindex_T_3}; // @[RegisterRouter.scala:87:24]
wire [3:0] _out_frontSel_T = 4'h1 << out_iindex; // @[OneHot.scala:58:35]
wire out_frontSel_0 = _out_frontSel_T[0]; // @[OneHot.scala:58:35]
wire out_frontSel_1 = _out_frontSel_T[1]; // @[OneHot.scala:58:35]
wire out_frontSel_2 = _out_frontSel_T[2]; // @[OneHot.scala:58:35]
wire out_frontSel_3 = _out_frontSel_T[3]; // @[OneHot.scala:58:35]
wire [3:0] _out_backSel_T = 4'h1 << out_oindex; // @[OneHot.scala:58:35]
wire out_backSel_0 = _out_backSel_T[0]; // @[OneHot.scala:58:35]
wire out_backSel_1 = _out_backSel_T[1]; // @[OneHot.scala:58:35]
wire out_backSel_2 = _out_backSel_T[2]; // @[OneHot.scala:58:35]
wire out_backSel_3 = _out_backSel_T[3]; // @[OneHot.scala:58:35]
wire _GEN_2 = in_valid & out_front_ready; // @[RegisterRouter.scala:73:18, :87:24]
wire _out_rifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T = _GEN_2; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T = _GEN_2; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_1 = _out_rifireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_2 = _out_rifireMux_T_1 & out_frontSel_0; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_3 = _out_rifireMux_T_2 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_rivalid_0 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_rivalid_1 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_rivalid_2 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_rivalid_3 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_4 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_6 = _out_rifireMux_T_1 & out_frontSel_1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_7 = _out_rifireMux_T_6; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_10 = _out_rifireMux_T_1 & out_frontSel_2; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_11 = _out_rifireMux_T_10 & _out_T_2; // @[RegisterRouter.scala:87:24]
assign out_rivalid_4 = _out_rifireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_12 = ~_out_T_2; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_14 = _out_rifireMux_T_1 & out_frontSel_3; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_15 = _out_rifireMux_T_14 & _out_T_4; // @[RegisterRouter.scala:87:24]
assign out_rivalid_5 = _out_rifireMux_T_15; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_16 = ~_out_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_2 = _out_wifireMux_T & _out_wifireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_3 = _out_wifireMux_T_2 & out_frontSel_0; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_4 = _out_wifireMux_T_3 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_wivalid_0 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_wivalid_1 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_wivalid_2 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_wivalid_3 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_5 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_7 = _out_wifireMux_T_2 & out_frontSel_1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_8 = _out_wifireMux_T_7; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_11 = _out_wifireMux_T_2 & out_frontSel_2; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_12 = _out_wifireMux_T_11 & _out_T_2; // @[RegisterRouter.scala:87:24]
assign out_wivalid_4 = _out_wifireMux_T_12; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_all = _out_wifireMux_T_12 & _out_T_49; // @[ReduceOthers.scala:47:21]
wire _out_wifireMux_T_13 = ~_out_T_2; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_14 = out_wifireMux_out_2 | _out_wifireMux_T_13; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_2 = _out_wifireMux_T_14; // @[MuxLiteral.scala:49:48]
wire _out_wifireMux_T_15 = _out_wifireMux_T_2 & out_frontSel_3; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_16 = _out_wifireMux_T_15 & _out_T_4; // @[RegisterRouter.scala:87:24]
assign out_wivalid_5 = _out_wifireMux_T_16; // @[RegisterRouter.scala:87:24]
wire out_wifireMux_all_1 = _out_wifireMux_T_16 & _out_T_62; // @[ReduceOthers.scala:47:21]
wire _out_wifireMux_T_17 = ~_out_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_18 = out_wifireMux_out_3 | _out_wifireMux_T_17; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_3 = _out_wifireMux_T_18; // @[MuxLiteral.scala:49:48]
wire [3:0] _GEN_3 = {{_out_wifireMux_WIRE_3}, {_out_wifireMux_WIRE_2}, {1'h1}, {1'h1}}; // @[MuxLiteral.scala:49:{10,48}]
wire out_wifireMux = _GEN_3[out_iindex]; // @[MuxLiteral.scala:49:10]
wire _GEN_4 = _out_back_front_q_io_deq_valid & out_ready; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T = _GEN_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T = _GEN_4; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_1 = _out_rofireMux_T & _out_back_front_q_io_deq_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_2 = _out_rofireMux_T_1 & out_backSel_0; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_3 = _out_rofireMux_T_2 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_roready_0 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_roready_1 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_roready_2 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
assign out_roready_3 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_4 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_6 = _out_rofireMux_T_1 & out_backSel_1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_7 = _out_rofireMux_T_6; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_10 = _out_rofireMux_T_1 & out_backSel_2; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_11 = _out_rofireMux_T_10 & _out_T_3; // @[RegisterRouter.scala:87:24]
assign out_roready_4 = _out_rofireMux_T_11; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_12 = ~_out_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_14 = _out_rofireMux_T_1 & out_backSel_3; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_15 = _out_rofireMux_T_14 & _out_T_5; // @[RegisterRouter.scala:87:24]
assign out_roready_5 = _out_rofireMux_T_15; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_16 = ~_out_T_5; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_1 = ~_out_back_front_q_io_deq_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_2 = _out_wofireMux_T & _out_wofireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_3 = _out_wofireMux_T_2 & out_backSel_0; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_4 = _out_wofireMux_T_3 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_woready_0 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_woready_1 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_woready_2 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
assign out_woready_3 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_5 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_7 = _out_wofireMux_T_2 & out_backSel_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_8 = _out_wofireMux_T_7; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_11 = _out_wofireMux_T_2 & out_backSel_2; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_12 = _out_wofireMux_T_11 & _out_T_3; // @[RegisterRouter.scala:87:24]
assign out_woready_4 = _out_wofireMux_T_12; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_all = _out_wofireMux_T_12 & _out_T_52; // @[ReduceOthers.scala:47:21]
wire _out_wofireMux_T_13 = ~_out_T_3; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_14 = out_wofireMux_out_2 | _out_wofireMux_T_13; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_2 = _out_wofireMux_T_14; // @[MuxLiteral.scala:49:48]
wire _out_wofireMux_T_15 = _out_wofireMux_T_2 & out_backSel_3; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_16 = _out_wofireMux_T_15 & _out_T_5; // @[RegisterRouter.scala:87:24]
assign out_woready_5 = _out_wofireMux_T_16; // @[RegisterRouter.scala:87:24]
wire out_wofireMux_all_1 = _out_wofireMux_T_16 & _out_T_65; // @[ReduceOthers.scala:47:21]
wire _out_wofireMux_T_17 = ~_out_T_5; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_18 = out_wofireMux_out_3 | _out_wofireMux_T_17; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_3 = _out_wofireMux_T_18; // @[MuxLiteral.scala:49:48]
wire [3:0] _GEN_5 = {{_out_wofireMux_WIRE_3}, {_out_wofireMux_WIRE_2}, {1'h1}, {1'h1}}; // @[MuxLiteral.scala:49:{10,48}]
wire out_wofireMux = _GEN_5[out_oindex]; // @[MuxLiteral.scala:49:10]
wire out_iready = out_front_bits_read | out_wifireMux; // @[MuxLiteral.scala:49:10]
wire out_oready = _out_back_front_q_io_deq_bits_read | out_wofireMux; // @[MuxLiteral.scala:49:10]
assign _out_in_ready_T = out_front_ready & out_iready; // @[RegisterRouter.scala:87:24]
assign in_ready = _out_in_ready_T; // @[RegisterRouter.scala:73:18, :87:24]
assign _out_front_valid_T = in_valid & out_iready; // @[RegisterRouter.scala:73:18, :87:24]
assign out_front_valid = _out_front_valid_T; // @[RegisterRouter.scala:87:24]
wire _out_front_q_io_deq_ready_T = out_ready & out_oready; // @[RegisterRouter.scala:87:24]
assign _out_out_valid_T = _out_back_front_q_io_deq_valid & out_oready; // @[RegisterRouter.scala:87:24]
assign out_valid = _out_out_valid_T; // @[RegisterRouter.scala:87:24]
wire [3:0] _GEN_6 = {{_out_out_bits_data_WIRE_3}, {_out_out_bits_data_WIRE_2}, {1'h1}, {_out_out_bits_data_WIRE_0}}; // @[MuxLiteral.scala:49:{10,48}]
wire _out_out_bits_data_T_1 = _GEN_6[out_oindex]; // @[MuxLiteral.scala:49:10]
wire [63:0] _out_out_bits_data_T_3 = _GEN[out_oindex]; // @[MuxLiteral.scala:49:10]
assign _out_out_bits_data_T_4 = _out_out_bits_data_T_1 ? _out_out_bits_data_T_3 : 64'h0; // @[MuxLiteral.scala:49:10]
assign out_bits_data = _out_out_bits_data_T_4; // @[RegisterRouter.scala:87:24]
assign ctrlnodeIn_d_bits_size = ctrlnodeIn_d_bits_d_size; // @[Edges.scala:792:17]
assign ctrlnodeIn_d_bits_source = ctrlnodeIn_d_bits_d_source; // @[Edges.scala:792:17]
assign ctrlnodeIn_d_bits_opcode = {2'h0, _ctrlnodeIn_d_bits_opcode_T}; // @[RegisterRouter.scala:105:{19,25}]
wire _T_1 = ~io_flush_match_0 & flushInValid; // @[Control.scala:38:9, :45:33, :56:{11,27}]
always @(posedge clock) begin // @[Control.scala:38:9]
if (reset) begin // @[Control.scala:38:9]
flushInValid <= 1'h0; // @[Control.scala:45:33]
flushOutValid <= 1'h0; // @[Control.scala:47:33]
end
else begin // @[Control.scala:38:9]
flushInValid <= out_f_wivalid_5 | out_f_wivalid_4 | ~(_T_1 | io_flush_req_ready_0) & flushInValid; // @[RegisterRouter.scala:87:24]
flushOutValid <= _T_1 | io_flush_resp_0 | ~flushOutReady & flushOutValid; // @[Control.scala:38:9, :47:33, :48:34, :50:{26,42}, :51:{26,42}, :56:{27,44}, :58:21]
end
if (_out_T_57) // @[Control.scala:64:20]
flushInAddress <= {28'h0, _out_flushInAddress_T}; // @[Control.scala:46:29, :64:{55,63}]
else if (_out_T_44) // @[Control.scala:71:20]
flushInAddress <= _out_T_42; // @[RegisterRouter.scala:87:24]
always @(posedge)
TLMonitor_33 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (ctrlnodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (ctrlnodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (ctrlnodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (ctrlnodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (ctrlnodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (ctrlnodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (ctrlnodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (ctrlnodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (ctrlnodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (ctrlnodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (ctrlnodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (ctrlnodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (ctrlnodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (ctrlnodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (ctrlnodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (ctrlnodeIn_d_bits_data) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue1_RegMapperInput_i9_m8 out_back_front_q ( // @[RegisterRouter.scala:87:24]
.clock (clock),
.reset (reset),
.io_enq_ready (out_front_ready),
.io_enq_valid (out_front_valid), // @[RegisterRouter.scala:87:24]
.io_enq_bits_read (out_front_bits_read), // @[RegisterRouter.scala:87:24]
.io_enq_bits_index (out_front_bits_index), // @[RegisterRouter.scala:87:24]
.io_enq_bits_data (out_front_bits_data), // @[RegisterRouter.scala:87:24]
.io_enq_bits_mask (out_front_bits_mask), // @[RegisterRouter.scala:87:24]
.io_enq_bits_extra_tlrr_extra_source (out_front_bits_extra_tlrr_extra_source), // @[RegisterRouter.scala:87:24]
.io_enq_bits_extra_tlrr_extra_size (out_front_bits_extra_tlrr_extra_size), // @[RegisterRouter.scala:87:24]
.io_deq_ready (_out_front_q_io_deq_ready_T), // @[RegisterRouter.scala:87:24]
.io_deq_valid (_out_back_front_q_io_deq_valid),
.io_deq_bits_read (_out_back_front_q_io_deq_bits_read),
.io_deq_bits_index (_out_back_front_q_io_deq_bits_index),
.io_deq_bits_data (_out_back_front_q_io_deq_bits_data),
.io_deq_bits_mask (_out_back_front_q_io_deq_bits_mask),
.io_deq_bits_extra_tlrr_extra_source (out_bits_extra_tlrr_extra_source),
.io_deq_bits_extra_tlrr_extra_size (out_bits_extra_tlrr_extra_size)
); // @[RegisterRouter.scala:87:24]
assign out_bits_read = _out_back_front_q_io_deq_bits_read; // @[RegisterRouter.scala:87:24]
assign auto_ctrl_in_a_ready = auto_ctrl_in_a_ready_0; // @[Control.scala:38:9]
assign auto_ctrl_in_d_valid = auto_ctrl_in_d_valid_0; // @[Control.scala:38:9]
assign auto_ctrl_in_d_bits_opcode = auto_ctrl_in_d_bits_opcode_0; // @[Control.scala:38:9]
assign auto_ctrl_in_d_bits_size = auto_ctrl_in_d_bits_size_0; // @[Control.scala:38:9]
assign auto_ctrl_in_d_bits_source = auto_ctrl_in_d_bits_source_0; // @[Control.scala:38:9]
assign auto_ctrl_in_d_bits_data = auto_ctrl_in_d_bits_data_0; // @[Control.scala:38:9]
assign io_flush_req_valid = io_flush_req_valid_0; // @[Control.scala:38:9]
assign io_flush_req_bits = io_flush_req_bits_0; // @[Control.scala:38:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_111( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_128 io_out_source_valid ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
| module BranchKillableQueue_13( // @[util.scala:448:7]
input clock, // @[util.scala:448:7]
input reset, // @[util.scala:448:7]
output io_enq_ready, // @[util.scala:453:14]
input io_enq_valid, // @[util.scala:453:14]
input [6:0] io_enq_bits_uop_uopc, // @[util.scala:453:14]
input [31:0] io_enq_bits_uop_inst, // @[util.scala:453:14]
input [31:0] io_enq_bits_uop_debug_inst, // @[util.scala:453:14]
input io_enq_bits_uop_is_rvc, // @[util.scala:453:14]
input [33:0] io_enq_bits_uop_debug_pc, // @[util.scala:453:14]
input [2:0] io_enq_bits_uop_iq_type, // @[util.scala:453:14]
input [9:0] io_enq_bits_uop_fu_code, // @[util.scala:453:14]
input [3:0] io_enq_bits_uop_ctrl_br_type, // @[util.scala:453:14]
input [1:0] io_enq_bits_uop_ctrl_op1_sel, // @[util.scala:453:14]
input [2:0] io_enq_bits_uop_ctrl_op2_sel, // @[util.scala:453:14]
input [2:0] io_enq_bits_uop_ctrl_imm_sel, // @[util.scala:453:14]
input [4:0] io_enq_bits_uop_ctrl_op_fcn, // @[util.scala:453:14]
input io_enq_bits_uop_ctrl_fcn_dw, // @[util.scala:453:14]
input [2:0] io_enq_bits_uop_ctrl_csr_cmd, // @[util.scala:453:14]
input io_enq_bits_uop_ctrl_is_load, // @[util.scala:453:14]
input io_enq_bits_uop_ctrl_is_sta, // @[util.scala:453:14]
input io_enq_bits_uop_ctrl_is_std, // @[util.scala:453:14]
input [1:0] io_enq_bits_uop_iw_state, // @[util.scala:453:14]
input io_enq_bits_uop_iw_p1_poisoned, // @[util.scala:453:14]
input io_enq_bits_uop_iw_p2_poisoned, // @[util.scala:453:14]
input io_enq_bits_uop_is_br, // @[util.scala:453:14]
input io_enq_bits_uop_is_jalr, // @[util.scala:453:14]
input io_enq_bits_uop_is_jal, // @[util.scala:453:14]
input io_enq_bits_uop_is_sfb, // @[util.scala:453:14]
input [3:0] io_enq_bits_uop_br_mask, // @[util.scala:453:14]
input [1:0] io_enq_bits_uop_br_tag, // @[util.scala:453:14]
input [3:0] io_enq_bits_uop_ftq_idx, // @[util.scala:453:14]
input io_enq_bits_uop_edge_inst, // @[util.scala:453:14]
input [5:0] io_enq_bits_uop_pc_lob, // @[util.scala:453:14]
input io_enq_bits_uop_taken, // @[util.scala:453:14]
input [19:0] io_enq_bits_uop_imm_packed, // @[util.scala:453:14]
input [11:0] io_enq_bits_uop_csr_addr, // @[util.scala:453:14]
input [5:0] io_enq_bits_uop_rob_idx, // @[util.scala:453:14]
input [3:0] io_enq_bits_uop_ldq_idx, // @[util.scala:453:14]
input [3:0] io_enq_bits_uop_stq_idx, // @[util.scala:453:14]
input [1:0] io_enq_bits_uop_rxq_idx, // @[util.scala:453:14]
input [6:0] io_enq_bits_uop_pdst, // @[util.scala:453:14]
input [6:0] io_enq_bits_uop_prs1, // @[util.scala:453:14]
input [6:0] io_enq_bits_uop_prs2, // @[util.scala:453:14]
input [6:0] io_enq_bits_uop_prs3, // @[util.scala:453:14]
input [3:0] io_enq_bits_uop_ppred, // @[util.scala:453:14]
input io_enq_bits_uop_prs1_busy, // @[util.scala:453:14]
input io_enq_bits_uop_prs2_busy, // @[util.scala:453:14]
input io_enq_bits_uop_prs3_busy, // @[util.scala:453:14]
input io_enq_bits_uop_ppred_busy, // @[util.scala:453:14]
input [6:0] io_enq_bits_uop_stale_pdst, // @[util.scala:453:14]
input io_enq_bits_uop_exception, // @[util.scala:453:14]
input [63:0] io_enq_bits_uop_exc_cause, // @[util.scala:453:14]
input io_enq_bits_uop_bypassable, // @[util.scala:453:14]
input [4:0] io_enq_bits_uop_mem_cmd, // @[util.scala:453:14]
input [1:0] io_enq_bits_uop_mem_size, // @[util.scala:453:14]
input io_enq_bits_uop_mem_signed, // @[util.scala:453:14]
input io_enq_bits_uop_is_fence, // @[util.scala:453:14]
input io_enq_bits_uop_is_fencei, // @[util.scala:453:14]
input io_enq_bits_uop_is_amo, // @[util.scala:453:14]
input io_enq_bits_uop_uses_ldq, // @[util.scala:453:14]
input io_enq_bits_uop_uses_stq, // @[util.scala:453:14]
input io_enq_bits_uop_is_sys_pc2epc, // @[util.scala:453:14]
input io_enq_bits_uop_is_unique, // @[util.scala:453:14]
input io_enq_bits_uop_flush_on_commit, // @[util.scala:453:14]
input io_enq_bits_uop_ldst_is_rs1, // @[util.scala:453:14]
input [5:0] io_enq_bits_uop_ldst, // @[util.scala:453:14]
input [5:0] io_enq_bits_uop_lrs1, // @[util.scala:453:14]
input [5:0] io_enq_bits_uop_lrs2, // @[util.scala:453:14]
input [5:0] io_enq_bits_uop_lrs3, // @[util.scala:453:14]
input io_enq_bits_uop_ldst_val, // @[util.scala:453:14]
input [1:0] io_enq_bits_uop_dst_rtype, // @[util.scala:453:14]
input [1:0] io_enq_bits_uop_lrs1_rtype, // @[util.scala:453:14]
input [1:0] io_enq_bits_uop_lrs2_rtype, // @[util.scala:453:14]
input io_enq_bits_uop_frs3_en, // @[util.scala:453:14]
input io_enq_bits_uop_fp_val, // @[util.scala:453:14]
input io_enq_bits_uop_fp_single, // @[util.scala:453:14]
input io_enq_bits_uop_xcpt_pf_if, // @[util.scala:453:14]
input io_enq_bits_uop_xcpt_ae_if, // @[util.scala:453:14]
input io_enq_bits_uop_xcpt_ma_if, // @[util.scala:453:14]
input io_enq_bits_uop_bp_debug_if, // @[util.scala:453:14]
input io_enq_bits_uop_bp_xcpt_if, // @[util.scala:453:14]
input [1:0] io_enq_bits_uop_debug_fsrc, // @[util.scala:453:14]
input [1:0] io_enq_bits_uop_debug_tsrc, // @[util.scala:453:14]
input [33:0] io_enq_bits_addr, // @[util.scala:453:14]
input [63:0] io_enq_bits_data, // @[util.scala:453:14]
input io_enq_bits_is_hella, // @[util.scala:453:14]
input io_enq_bits_tag_match, // @[util.scala:453:14]
input [1:0] io_enq_bits_old_meta_coh_state, // @[util.scala:453:14]
input [21:0] io_enq_bits_old_meta_tag, // @[util.scala:453:14]
input [1:0] io_enq_bits_way_en, // @[util.scala:453:14]
input [4:0] io_enq_bits_sdq_id, // @[util.scala:453:14]
input io_deq_ready, // @[util.scala:453:14]
output io_deq_valid, // @[util.scala:453:14]
output [6:0] io_deq_bits_uop_uopc, // @[util.scala:453:14]
output [31:0] io_deq_bits_uop_inst, // @[util.scala:453:14]
output [31:0] io_deq_bits_uop_debug_inst, // @[util.scala:453:14]
output io_deq_bits_uop_is_rvc, // @[util.scala:453:14]
output [33:0] io_deq_bits_uop_debug_pc, // @[util.scala:453:14]
output [2:0] io_deq_bits_uop_iq_type, // @[util.scala:453:14]
output [9:0] io_deq_bits_uop_fu_code, // @[util.scala:453:14]
output [3:0] io_deq_bits_uop_ctrl_br_type, // @[util.scala:453:14]
output [1:0] io_deq_bits_uop_ctrl_op1_sel, // @[util.scala:453:14]
output [2:0] io_deq_bits_uop_ctrl_op2_sel, // @[util.scala:453:14]
output [2:0] io_deq_bits_uop_ctrl_imm_sel, // @[util.scala:453:14]
output [4:0] io_deq_bits_uop_ctrl_op_fcn, // @[util.scala:453:14]
output io_deq_bits_uop_ctrl_fcn_dw, // @[util.scala:453:14]
output [2:0] io_deq_bits_uop_ctrl_csr_cmd, // @[util.scala:453:14]
output io_deq_bits_uop_ctrl_is_load, // @[util.scala:453:14]
output io_deq_bits_uop_ctrl_is_sta, // @[util.scala:453:14]
output io_deq_bits_uop_ctrl_is_std, // @[util.scala:453:14]
output [1:0] io_deq_bits_uop_iw_state, // @[util.scala:453:14]
output io_deq_bits_uop_iw_p1_poisoned, // @[util.scala:453:14]
output io_deq_bits_uop_iw_p2_poisoned, // @[util.scala:453:14]
output io_deq_bits_uop_is_br, // @[util.scala:453:14]
output io_deq_bits_uop_is_jalr, // @[util.scala:453:14]
output io_deq_bits_uop_is_jal, // @[util.scala:453:14]
output io_deq_bits_uop_is_sfb, // @[util.scala:453:14]
output [3:0] io_deq_bits_uop_br_mask, // @[util.scala:453:14]
output [1:0] io_deq_bits_uop_br_tag, // @[util.scala:453:14]
output [3:0] io_deq_bits_uop_ftq_idx, // @[util.scala:453:14]
output io_deq_bits_uop_edge_inst, // @[util.scala:453:14]
output [5:0] io_deq_bits_uop_pc_lob, // @[util.scala:453:14]
output io_deq_bits_uop_taken, // @[util.scala:453:14]
output [19:0] io_deq_bits_uop_imm_packed, // @[util.scala:453:14]
output [11:0] io_deq_bits_uop_csr_addr, // @[util.scala:453:14]
output [5:0] io_deq_bits_uop_rob_idx, // @[util.scala:453:14]
output [3:0] io_deq_bits_uop_ldq_idx, // @[util.scala:453:14]
output [3:0] io_deq_bits_uop_stq_idx, // @[util.scala:453:14]
output [1:0] io_deq_bits_uop_rxq_idx, // @[util.scala:453:14]
output [6:0] io_deq_bits_uop_pdst, // @[util.scala:453:14]
output [6:0] io_deq_bits_uop_prs1, // @[util.scala:453:14]
output [6:0] io_deq_bits_uop_prs2, // @[util.scala:453:14]
output [6:0] io_deq_bits_uop_prs3, // @[util.scala:453:14]
output [3:0] io_deq_bits_uop_ppred, // @[util.scala:453:14]
output io_deq_bits_uop_prs1_busy, // @[util.scala:453:14]
output io_deq_bits_uop_prs2_busy, // @[util.scala:453:14]
output io_deq_bits_uop_prs3_busy, // @[util.scala:453:14]
output io_deq_bits_uop_ppred_busy, // @[util.scala:453:14]
output [6:0] io_deq_bits_uop_stale_pdst, // @[util.scala:453:14]
output io_deq_bits_uop_exception, // @[util.scala:453:14]
output [63:0] io_deq_bits_uop_exc_cause, // @[util.scala:453:14]
output io_deq_bits_uop_bypassable, // @[util.scala:453:14]
output [4:0] io_deq_bits_uop_mem_cmd, // @[util.scala:453:14]
output [1:0] io_deq_bits_uop_mem_size, // @[util.scala:453:14]
output io_deq_bits_uop_mem_signed, // @[util.scala:453:14]
output io_deq_bits_uop_is_fence, // @[util.scala:453:14]
output io_deq_bits_uop_is_fencei, // @[util.scala:453:14]
output io_deq_bits_uop_is_amo, // @[util.scala:453:14]
output io_deq_bits_uop_uses_ldq, // @[util.scala:453:14]
output io_deq_bits_uop_uses_stq, // @[util.scala:453:14]
output io_deq_bits_uop_is_sys_pc2epc, // @[util.scala:453:14]
output io_deq_bits_uop_is_unique, // @[util.scala:453:14]
output io_deq_bits_uop_flush_on_commit, // @[util.scala:453:14]
output io_deq_bits_uop_ldst_is_rs1, // @[util.scala:453:14]
output [5:0] io_deq_bits_uop_ldst, // @[util.scala:453:14]
output [5:0] io_deq_bits_uop_lrs1, // @[util.scala:453:14]
output [5:0] io_deq_bits_uop_lrs2, // @[util.scala:453:14]
output [5:0] io_deq_bits_uop_lrs3, // @[util.scala:453:14]
output io_deq_bits_uop_ldst_val, // @[util.scala:453:14]
output [1:0] io_deq_bits_uop_dst_rtype, // @[util.scala:453:14]
output [1:0] io_deq_bits_uop_lrs1_rtype, // @[util.scala:453:14]
output [1:0] io_deq_bits_uop_lrs2_rtype, // @[util.scala:453:14]
output io_deq_bits_uop_frs3_en, // @[util.scala:453:14]
output io_deq_bits_uop_fp_val, // @[util.scala:453:14]
output io_deq_bits_uop_fp_single, // @[util.scala:453:14]
output io_deq_bits_uop_xcpt_pf_if, // @[util.scala:453:14]
output io_deq_bits_uop_xcpt_ae_if, // @[util.scala:453:14]
output io_deq_bits_uop_xcpt_ma_if, // @[util.scala:453:14]
output io_deq_bits_uop_bp_debug_if, // @[util.scala:453:14]
output io_deq_bits_uop_bp_xcpt_if, // @[util.scala:453:14]
output [1:0] io_deq_bits_uop_debug_fsrc, // @[util.scala:453:14]
output [1:0] io_deq_bits_uop_debug_tsrc, // @[util.scala:453:14]
output [33:0] io_deq_bits_addr, // @[util.scala:453:14]
output [63:0] io_deq_bits_data, // @[util.scala:453:14]
output io_deq_bits_is_hella, // @[util.scala:453:14]
output io_deq_bits_tag_match, // @[util.scala:453:14]
output [1:0] io_deq_bits_old_meta_coh_state, // @[util.scala:453:14]
output [21:0] io_deq_bits_old_meta_tag, // @[util.scala:453:14]
output [4:0] io_deq_bits_sdq_id, // @[util.scala:453:14]
output io_empty // @[util.scala:453:14]
);
wire [3:0] out_uop_br_mask; // @[util.scala:506:17]
wire [130:0] _ram_ext_R0_data; // @[util.scala:464:20]
wire io_enq_valid_0 = io_enq_valid; // @[util.scala:448:7]
wire [6:0] io_enq_bits_uop_uopc_0 = io_enq_bits_uop_uopc; // @[util.scala:448:7]
wire [31:0] io_enq_bits_uop_inst_0 = io_enq_bits_uop_inst; // @[util.scala:448:7]
wire [31:0] io_enq_bits_uop_debug_inst_0 = io_enq_bits_uop_debug_inst; // @[util.scala:448:7]
wire io_enq_bits_uop_is_rvc_0 = io_enq_bits_uop_is_rvc; // @[util.scala:448:7]
wire [33:0] io_enq_bits_uop_debug_pc_0 = io_enq_bits_uop_debug_pc; // @[util.scala:448:7]
wire [2:0] io_enq_bits_uop_iq_type_0 = io_enq_bits_uop_iq_type; // @[util.scala:448:7]
wire [9:0] io_enq_bits_uop_fu_code_0 = io_enq_bits_uop_fu_code; // @[util.scala:448:7]
wire [3:0] io_enq_bits_uop_ctrl_br_type_0 = io_enq_bits_uop_ctrl_br_type; // @[util.scala:448:7]
wire [1:0] io_enq_bits_uop_ctrl_op1_sel_0 = io_enq_bits_uop_ctrl_op1_sel; // @[util.scala:448:7]
wire [2:0] io_enq_bits_uop_ctrl_op2_sel_0 = io_enq_bits_uop_ctrl_op2_sel; // @[util.scala:448:7]
wire [2:0] io_enq_bits_uop_ctrl_imm_sel_0 = io_enq_bits_uop_ctrl_imm_sel; // @[util.scala:448:7]
wire [4:0] io_enq_bits_uop_ctrl_op_fcn_0 = io_enq_bits_uop_ctrl_op_fcn; // @[util.scala:448:7]
wire io_enq_bits_uop_ctrl_fcn_dw_0 = io_enq_bits_uop_ctrl_fcn_dw; // @[util.scala:448:7]
wire [2:0] io_enq_bits_uop_ctrl_csr_cmd_0 = io_enq_bits_uop_ctrl_csr_cmd; // @[util.scala:448:7]
wire io_enq_bits_uop_ctrl_is_load_0 = io_enq_bits_uop_ctrl_is_load; // @[util.scala:448:7]
wire io_enq_bits_uop_ctrl_is_sta_0 = io_enq_bits_uop_ctrl_is_sta; // @[util.scala:448:7]
wire io_enq_bits_uop_ctrl_is_std_0 = io_enq_bits_uop_ctrl_is_std; // @[util.scala:448:7]
wire [1:0] io_enq_bits_uop_iw_state_0 = io_enq_bits_uop_iw_state; // @[util.scala:448:7]
wire io_enq_bits_uop_iw_p1_poisoned_0 = io_enq_bits_uop_iw_p1_poisoned; // @[util.scala:448:7]
wire io_enq_bits_uop_iw_p2_poisoned_0 = io_enq_bits_uop_iw_p2_poisoned; // @[util.scala:448:7]
wire io_enq_bits_uop_is_br_0 = io_enq_bits_uop_is_br; // @[util.scala:448:7]
wire io_enq_bits_uop_is_jalr_0 = io_enq_bits_uop_is_jalr; // @[util.scala:448:7]
wire io_enq_bits_uop_is_jal_0 = io_enq_bits_uop_is_jal; // @[util.scala:448:7]
wire io_enq_bits_uop_is_sfb_0 = io_enq_bits_uop_is_sfb; // @[util.scala:448:7]
wire [3:0] io_enq_bits_uop_br_mask_0 = io_enq_bits_uop_br_mask; // @[util.scala:448:7]
wire [1:0] io_enq_bits_uop_br_tag_0 = io_enq_bits_uop_br_tag; // @[util.scala:448:7]
wire [3:0] io_enq_bits_uop_ftq_idx_0 = io_enq_bits_uop_ftq_idx; // @[util.scala:448:7]
wire io_enq_bits_uop_edge_inst_0 = io_enq_bits_uop_edge_inst; // @[util.scala:448:7]
wire [5:0] io_enq_bits_uop_pc_lob_0 = io_enq_bits_uop_pc_lob; // @[util.scala:448:7]
wire io_enq_bits_uop_taken_0 = io_enq_bits_uop_taken; // @[util.scala:448:7]
wire [19:0] io_enq_bits_uop_imm_packed_0 = io_enq_bits_uop_imm_packed; // @[util.scala:448:7]
wire [11:0] io_enq_bits_uop_csr_addr_0 = io_enq_bits_uop_csr_addr; // @[util.scala:448:7]
wire [5:0] io_enq_bits_uop_rob_idx_0 = io_enq_bits_uop_rob_idx; // @[util.scala:448:7]
wire [3:0] io_enq_bits_uop_ldq_idx_0 = io_enq_bits_uop_ldq_idx; // @[util.scala:448:7]
wire [3:0] io_enq_bits_uop_stq_idx_0 = io_enq_bits_uop_stq_idx; // @[util.scala:448:7]
wire [1:0] io_enq_bits_uop_rxq_idx_0 = io_enq_bits_uop_rxq_idx; // @[util.scala:448:7]
wire [6:0] io_enq_bits_uop_pdst_0 = io_enq_bits_uop_pdst; // @[util.scala:448:7]
wire [6:0] io_enq_bits_uop_prs1_0 = io_enq_bits_uop_prs1; // @[util.scala:448:7]
wire [6:0] io_enq_bits_uop_prs2_0 = io_enq_bits_uop_prs2; // @[util.scala:448:7]
wire [6:0] io_enq_bits_uop_prs3_0 = io_enq_bits_uop_prs3; // @[util.scala:448:7]
wire [3:0] io_enq_bits_uop_ppred_0 = io_enq_bits_uop_ppred; // @[util.scala:448:7]
wire io_enq_bits_uop_prs1_busy_0 = io_enq_bits_uop_prs1_busy; // @[util.scala:448:7]
wire io_enq_bits_uop_prs2_busy_0 = io_enq_bits_uop_prs2_busy; // @[util.scala:448:7]
wire io_enq_bits_uop_prs3_busy_0 = io_enq_bits_uop_prs3_busy; // @[util.scala:448:7]
wire io_enq_bits_uop_ppred_busy_0 = io_enq_bits_uop_ppred_busy; // @[util.scala:448:7]
wire [6:0] io_enq_bits_uop_stale_pdst_0 = io_enq_bits_uop_stale_pdst; // @[util.scala:448:7]
wire io_enq_bits_uop_exception_0 = io_enq_bits_uop_exception; // @[util.scala:448:7]
wire [63:0] io_enq_bits_uop_exc_cause_0 = io_enq_bits_uop_exc_cause; // @[util.scala:448:7]
wire io_enq_bits_uop_bypassable_0 = io_enq_bits_uop_bypassable; // @[util.scala:448:7]
wire [4:0] io_enq_bits_uop_mem_cmd_0 = io_enq_bits_uop_mem_cmd; // @[util.scala:448:7]
wire [1:0] io_enq_bits_uop_mem_size_0 = io_enq_bits_uop_mem_size; // @[util.scala:448:7]
wire io_enq_bits_uop_mem_signed_0 = io_enq_bits_uop_mem_signed; // @[util.scala:448:7]
wire io_enq_bits_uop_is_fence_0 = io_enq_bits_uop_is_fence; // @[util.scala:448:7]
wire io_enq_bits_uop_is_fencei_0 = io_enq_bits_uop_is_fencei; // @[util.scala:448:7]
wire io_enq_bits_uop_is_amo_0 = io_enq_bits_uop_is_amo; // @[util.scala:448:7]
wire io_enq_bits_uop_uses_ldq_0 = io_enq_bits_uop_uses_ldq; // @[util.scala:448:7]
wire io_enq_bits_uop_uses_stq_0 = io_enq_bits_uop_uses_stq; // @[util.scala:448:7]
wire io_enq_bits_uop_is_sys_pc2epc_0 = io_enq_bits_uop_is_sys_pc2epc; // @[util.scala:448:7]
wire io_enq_bits_uop_is_unique_0 = io_enq_bits_uop_is_unique; // @[util.scala:448:7]
wire io_enq_bits_uop_flush_on_commit_0 = io_enq_bits_uop_flush_on_commit; // @[util.scala:448:7]
wire io_enq_bits_uop_ldst_is_rs1_0 = io_enq_bits_uop_ldst_is_rs1; // @[util.scala:448:7]
wire [5:0] io_enq_bits_uop_ldst_0 = io_enq_bits_uop_ldst; // @[util.scala:448:7]
wire [5:0] io_enq_bits_uop_lrs1_0 = io_enq_bits_uop_lrs1; // @[util.scala:448:7]
wire [5:0] io_enq_bits_uop_lrs2_0 = io_enq_bits_uop_lrs2; // @[util.scala:448:7]
wire [5:0] io_enq_bits_uop_lrs3_0 = io_enq_bits_uop_lrs3; // @[util.scala:448:7]
wire io_enq_bits_uop_ldst_val_0 = io_enq_bits_uop_ldst_val; // @[util.scala:448:7]
wire [1:0] io_enq_bits_uop_dst_rtype_0 = io_enq_bits_uop_dst_rtype; // @[util.scala:448:7]
wire [1:0] io_enq_bits_uop_lrs1_rtype_0 = io_enq_bits_uop_lrs1_rtype; // @[util.scala:448:7]
wire [1:0] io_enq_bits_uop_lrs2_rtype_0 = io_enq_bits_uop_lrs2_rtype; // @[util.scala:448:7]
wire io_enq_bits_uop_frs3_en_0 = io_enq_bits_uop_frs3_en; // @[util.scala:448:7]
wire io_enq_bits_uop_fp_val_0 = io_enq_bits_uop_fp_val; // @[util.scala:448:7]
wire io_enq_bits_uop_fp_single_0 = io_enq_bits_uop_fp_single; // @[util.scala:448:7]
wire io_enq_bits_uop_xcpt_pf_if_0 = io_enq_bits_uop_xcpt_pf_if; // @[util.scala:448:7]
wire io_enq_bits_uop_xcpt_ae_if_0 = io_enq_bits_uop_xcpt_ae_if; // @[util.scala:448:7]
wire io_enq_bits_uop_xcpt_ma_if_0 = io_enq_bits_uop_xcpt_ma_if; // @[util.scala:448:7]
wire io_enq_bits_uop_bp_debug_if_0 = io_enq_bits_uop_bp_debug_if; // @[util.scala:448:7]
wire io_enq_bits_uop_bp_xcpt_if_0 = io_enq_bits_uop_bp_xcpt_if; // @[util.scala:448:7]
wire [1:0] io_enq_bits_uop_debug_fsrc_0 = io_enq_bits_uop_debug_fsrc; // @[util.scala:448:7]
wire [1:0] io_enq_bits_uop_debug_tsrc_0 = io_enq_bits_uop_debug_tsrc; // @[util.scala:448:7]
wire [33:0] io_enq_bits_addr_0 = io_enq_bits_addr; // @[util.scala:448:7]
wire [63:0] io_enq_bits_data_0 = io_enq_bits_data; // @[util.scala:448:7]
wire io_enq_bits_is_hella_0 = io_enq_bits_is_hella; // @[util.scala:448:7]
wire io_enq_bits_tag_match_0 = io_enq_bits_tag_match; // @[util.scala:448:7]
wire [1:0] io_enq_bits_old_meta_coh_state_0 = io_enq_bits_old_meta_coh_state; // @[util.scala:448:7]
wire [21:0] io_enq_bits_old_meta_tag_0 = io_enq_bits_old_meta_tag; // @[util.scala:448:7]
wire [1:0] io_enq_bits_way_en_0 = io_enq_bits_way_en; // @[util.scala:448:7]
wire [4:0] io_enq_bits_sdq_id_0 = io_enq_bits_sdq_id; // @[util.scala:448:7]
wire io_deq_ready_0 = io_deq_ready; // @[util.scala:448:7]
wire _valids_0_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_0_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_1_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_1_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_2_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_2_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_3_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_3_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_4_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_4_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_5_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_5_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_6_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_6_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_7_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_7_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_8_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_8_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_9_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_9_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_10_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_10_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_11_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_11_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_12_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_12_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_13_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_13_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_14_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_14_T_5 = 1'h1; // @[util.scala:481:72]
wire _valids_15_T_2 = 1'h1; // @[util.scala:481:32]
wire _valids_15_T_5 = 1'h1; // @[util.scala:481:72]
wire _io_deq_valid_T_4 = 1'h1; // @[util.scala:509:68]
wire _io_deq_valid_T_7 = 1'h1; // @[util.scala:509:111]
wire [3:0] _uops_0_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_1_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_2_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_3_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_4_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_5_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_6_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_7_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_8_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_9_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_10_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_11_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_12_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_13_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_14_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_15_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _uops_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [3:0] _io_deq_bits_uop_br_mask_T = 4'hF; // @[util.scala:85:27, :89:23]
wire [63:0] io_brupdate_b2_uop_exc_cause = 64'h0; // @[util.scala:448:7, :453:14]
wire [11:0] io_brupdate_b2_uop_csr_addr = 12'h0; // @[util.scala:448:7, :453:14]
wire [19:0] io_brupdate_b2_uop_imm_packed = 20'h0; // @[util.scala:448:7, :453:14]
wire [5:0] io_brupdate_b2_uop_pc_lob = 6'h0; // @[util.scala:448:7, :453:14]
wire [5:0] io_brupdate_b2_uop_rob_idx = 6'h0; // @[util.scala:448:7, :453:14]
wire [5:0] io_brupdate_b2_uop_ldst = 6'h0; // @[util.scala:448:7, :453:14]
wire [5:0] io_brupdate_b2_uop_lrs1 = 6'h0; // @[util.scala:448:7, :453:14]
wire [5:0] io_brupdate_b2_uop_lrs2 = 6'h0; // @[util.scala:448:7, :453:14]
wire [5:0] io_brupdate_b2_uop_lrs3 = 6'h0; // @[util.scala:448:7, :453:14]
wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn = 5'h0; // @[util.scala:448:7, :453:14]
wire [4:0] io_brupdate_b2_uop_mem_cmd = 5'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_uop_iw_state = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_uop_br_tag = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_uop_rxq_idx = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_uop_mem_size = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_uop_dst_rtype = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_uop_debug_fsrc = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_uop_debug_tsrc = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_pc_sel = 2'h0; // @[util.scala:448:7, :453:14]
wire [1:0] io_brupdate_b2_target_offset = 2'h0; // @[util.scala:448:7, :453:14]
wire [9:0] io_brupdate_b2_uop_fu_code = 10'h0; // @[util.scala:448:7, :453:14]
wire [2:0] io_brupdate_b2_uop_iq_type = 3'h0; // @[util.scala:448:7, :453:14]
wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel = 3'h0; // @[util.scala:448:7, :453:14]
wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel = 3'h0; // @[util.scala:448:7, :453:14]
wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd = 3'h0; // @[util.scala:448:7, :453:14]
wire [2:0] io_brupdate_b2_cfi_type = 3'h0; // @[util.scala:448:7, :453:14]
wire [33:0] io_brupdate_b2_uop_debug_pc = 34'h0; // @[util.scala:448:7, :453:14]
wire [33:0] io_brupdate_b2_jalr_target = 34'h0; // @[util.scala:448:7, :453:14]
wire io_brupdate_b2_uop_is_rvc = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_ctrl_fcn_dw = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_ctrl_is_load = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_ctrl_is_sta = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_ctrl_is_std = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_iw_p1_poisoned = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_iw_p2_poisoned = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_is_br = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_is_jalr = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_is_jal = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_is_sfb = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_edge_inst = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_taken = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_prs1_busy = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_prs2_busy = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_prs3_busy = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_ppred_busy = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_exception = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_bypassable = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_mem_signed = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_is_fence = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_is_fencei = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_is_amo = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_uses_ldq = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_uses_stq = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_is_sys_pc2epc = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_is_unique = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_flush_on_commit = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_ldst_is_rs1 = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_ldst_val = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_frs3_en = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_fp_val = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_fp_single = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_xcpt_pf_if = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_xcpt_ae_if = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_xcpt_ma_if = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_bp_debug_if = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_uop_bp_xcpt_if = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_valid = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_mispredict = 1'h0; // @[util.scala:448:7]
wire io_brupdate_b2_taken = 1'h0; // @[util.scala:448:7]
wire io_flush = 1'h0; // @[util.scala:448:7]
wire _valids_WIRE_0 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_1 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_2 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_3 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_4 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_5 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_6 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_7 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_8 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_9 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_10 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_11 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_12 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_13 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_14 = 1'h0; // @[util.scala:465:32]
wire _valids_WIRE_15 = 1'h0; // @[util.scala:465:32]
wire _valids_0_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_0_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_1_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_1_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_2_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_2_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_3_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_3_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_4_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_4_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_5_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_5_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_6_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_6_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_7_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_7_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_8_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_8_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_9_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_9_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_10_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_10_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_11_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_11_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_12_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_12_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_13_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_13_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_14_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_14_T_4 = 1'h0; // @[util.scala:481:83]
wire _valids_15_T_1 = 1'h0; // @[util.scala:118:59]
wire _valids_15_T_4 = 1'h0; // @[util.scala:481:83]
wire _io_deq_valid_T_3 = 1'h0; // @[util.scala:118:59]
wire _io_deq_valid_T_6 = 1'h0; // @[util.scala:509:122]
wire [31:0] io_brupdate_b2_uop_inst = 32'h0; // @[util.scala:448:7, :453:14]
wire [31:0] io_brupdate_b2_uop_debug_inst = 32'h0; // @[util.scala:448:7, :453:14]
wire [6:0] io_brupdate_b2_uop_uopc = 7'h0; // @[util.scala:448:7, :453:14]
wire [6:0] io_brupdate_b2_uop_pdst = 7'h0; // @[util.scala:448:7, :453:14]
wire [6:0] io_brupdate_b2_uop_prs1 = 7'h0; // @[util.scala:448:7, :453:14]
wire [6:0] io_brupdate_b2_uop_prs2 = 7'h0; // @[util.scala:448:7, :453:14]
wire [6:0] io_brupdate_b2_uop_prs3 = 7'h0; // @[util.scala:448:7, :453:14]
wire [6:0] io_brupdate_b2_uop_stale_pdst = 7'h0; // @[util.scala:448:7, :453:14]
wire [3:0] io_brupdate_b1_resolve_mask = 4'h0; // @[util.scala:448:7]
wire [3:0] io_brupdate_b1_mispredict_mask = 4'h0; // @[util.scala:448:7]
wire [3:0] io_brupdate_b2_uop_ctrl_br_type = 4'h0; // @[util.scala:448:7]
wire [3:0] io_brupdate_b2_uop_br_mask = 4'h0; // @[util.scala:448:7]
wire [3:0] io_brupdate_b2_uop_ftq_idx = 4'h0; // @[util.scala:448:7]
wire [3:0] io_brupdate_b2_uop_ldq_idx = 4'h0; // @[util.scala:448:7]
wire [3:0] io_brupdate_b2_uop_stq_idx = 4'h0; // @[util.scala:448:7]
wire [3:0] io_brupdate_b2_uop_ppred = 4'h0; // @[util.scala:448:7]
wire [3:0] _valids_0_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_1_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_2_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_3_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_4_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_5_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_6_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_7_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_8_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_9_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_10_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_11_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_12_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_13_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_14_T = 4'h0; // @[util.scala:118:51]
wire [3:0] _valids_15_T = 4'h0; // @[util.scala:118:51]
wire _io_enq_ready_T; // @[util.scala:504:19]
wire [3:0] _io_deq_valid_T_2 = 4'h0; // @[util.scala:118:51]
wire [3:0] _uops_br_mask_T_1 = io_enq_bits_uop_br_mask_0; // @[util.scala:85:25, :448:7]
wire _io_deq_valid_T_8; // @[util.scala:509:108]
wire [6:0] out_uop_uopc; // @[util.scala:506:17]
wire [31:0] out_uop_inst; // @[util.scala:506:17]
wire [31:0] out_uop_debug_inst; // @[util.scala:506:17]
wire out_uop_is_rvc; // @[util.scala:506:17]
wire [33:0] out_uop_debug_pc; // @[util.scala:506:17]
wire [2:0] out_uop_iq_type; // @[util.scala:506:17]
wire [9:0] out_uop_fu_code; // @[util.scala:506:17]
wire [3:0] out_uop_ctrl_br_type; // @[util.scala:506:17]
wire [1:0] out_uop_ctrl_op1_sel; // @[util.scala:506:17]
wire [2:0] out_uop_ctrl_op2_sel; // @[util.scala:506:17]
wire [2:0] out_uop_ctrl_imm_sel; // @[util.scala:506:17]
wire [4:0] out_uop_ctrl_op_fcn; // @[util.scala:506:17]
wire out_uop_ctrl_fcn_dw; // @[util.scala:506:17]
wire [2:0] out_uop_ctrl_csr_cmd; // @[util.scala:506:17]
wire out_uop_ctrl_is_load; // @[util.scala:506:17]
wire out_uop_ctrl_is_sta; // @[util.scala:506:17]
wire out_uop_ctrl_is_std; // @[util.scala:506:17]
wire [1:0] out_uop_iw_state; // @[util.scala:506:17]
wire out_uop_iw_p1_poisoned; // @[util.scala:506:17]
wire out_uop_iw_p2_poisoned; // @[util.scala:506:17]
wire out_uop_is_br; // @[util.scala:506:17]
wire out_uop_is_jalr; // @[util.scala:506:17]
wire out_uop_is_jal; // @[util.scala:506:17]
wire out_uop_is_sfb; // @[util.scala:506:17]
wire [3:0] _io_deq_bits_uop_br_mask_T_1; // @[util.scala:85:25]
wire [1:0] out_uop_br_tag; // @[util.scala:506:17]
wire [3:0] out_uop_ftq_idx; // @[util.scala:506:17]
wire out_uop_edge_inst; // @[util.scala:506:17]
wire [5:0] out_uop_pc_lob; // @[util.scala:506:17]
wire out_uop_taken; // @[util.scala:506:17]
wire [19:0] out_uop_imm_packed; // @[util.scala:506:17]
wire [11:0] out_uop_csr_addr; // @[util.scala:506:17]
wire [5:0] out_uop_rob_idx; // @[util.scala:506:17]
wire [3:0] out_uop_ldq_idx; // @[util.scala:506:17]
wire [3:0] out_uop_stq_idx; // @[util.scala:506:17]
wire [1:0] out_uop_rxq_idx; // @[util.scala:506:17]
wire [6:0] out_uop_pdst; // @[util.scala:506:17]
wire [6:0] out_uop_prs1; // @[util.scala:506:17]
wire [6:0] out_uop_prs2; // @[util.scala:506:17]
wire [6:0] out_uop_prs3; // @[util.scala:506:17]
wire [3:0] out_uop_ppred; // @[util.scala:506:17]
wire out_uop_prs1_busy; // @[util.scala:506:17]
wire out_uop_prs2_busy; // @[util.scala:506:17]
wire out_uop_prs3_busy; // @[util.scala:506:17]
wire out_uop_ppred_busy; // @[util.scala:506:17]
wire [6:0] out_uop_stale_pdst; // @[util.scala:506:17]
wire out_uop_exception; // @[util.scala:506:17]
wire [63:0] out_uop_exc_cause; // @[util.scala:506:17]
wire out_uop_bypassable; // @[util.scala:506:17]
wire [4:0] out_uop_mem_cmd; // @[util.scala:506:17]
wire [1:0] out_uop_mem_size; // @[util.scala:506:17]
wire out_uop_mem_signed; // @[util.scala:506:17]
wire out_uop_is_fence; // @[util.scala:506:17]
wire out_uop_is_fencei; // @[util.scala:506:17]
wire out_uop_is_amo; // @[util.scala:506:17]
wire out_uop_uses_ldq; // @[util.scala:506:17]
wire out_uop_uses_stq; // @[util.scala:506:17]
wire out_uop_is_sys_pc2epc; // @[util.scala:506:17]
wire out_uop_is_unique; // @[util.scala:506:17]
wire out_uop_flush_on_commit; // @[util.scala:506:17]
wire out_uop_ldst_is_rs1; // @[util.scala:506:17]
wire [5:0] out_uop_ldst; // @[util.scala:506:17]
wire [5:0] out_uop_lrs1; // @[util.scala:506:17]
wire [5:0] out_uop_lrs2; // @[util.scala:506:17]
wire [5:0] out_uop_lrs3; // @[util.scala:506:17]
wire out_uop_ldst_val; // @[util.scala:506:17]
wire [1:0] out_uop_dst_rtype; // @[util.scala:506:17]
wire [1:0] out_uop_lrs1_rtype; // @[util.scala:506:17]
wire [1:0] out_uop_lrs2_rtype; // @[util.scala:506:17]
wire out_uop_frs3_en; // @[util.scala:506:17]
wire out_uop_fp_val; // @[util.scala:506:17]
wire out_uop_fp_single; // @[util.scala:506:17]
wire out_uop_xcpt_pf_if; // @[util.scala:506:17]
wire out_uop_xcpt_ae_if; // @[util.scala:506:17]
wire out_uop_xcpt_ma_if; // @[util.scala:506:17]
wire out_uop_bp_debug_if; // @[util.scala:506:17]
wire out_uop_bp_xcpt_if; // @[util.scala:506:17]
wire [1:0] out_uop_debug_fsrc; // @[util.scala:506:17]
wire [1:0] out_uop_debug_tsrc; // @[util.scala:506:17]
wire [33:0] out_addr; // @[util.scala:506:17]
wire [63:0] out_data; // @[util.scala:506:17]
wire out_is_hella; // @[util.scala:506:17]
wire out_tag_match; // @[util.scala:506:17]
wire [1:0] out_old_meta_coh_state; // @[util.scala:506:17]
wire [21:0] out_old_meta_tag; // @[util.scala:506:17]
wire [1:0] out_way_en; // @[util.scala:506:17]
wire [4:0] out_sdq_id; // @[util.scala:506:17]
wire _io_empty_T_1; // @[util.scala:473:25]
wire io_enq_ready_0; // @[util.scala:448:7]
wire [3:0] io_deq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7]
wire [2:0] io_deq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7]
wire [2:0] io_deq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7]
wire [4:0] io_deq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7]
wire io_deq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7]
wire [2:0] io_deq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7]
wire io_deq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7]
wire io_deq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7]
wire io_deq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7]
wire [6:0] io_deq_bits_uop_uopc_0; // @[util.scala:448:7]
wire [31:0] io_deq_bits_uop_inst_0; // @[util.scala:448:7]
wire [31:0] io_deq_bits_uop_debug_inst_0; // @[util.scala:448:7]
wire io_deq_bits_uop_is_rvc_0; // @[util.scala:448:7]
wire [33:0] io_deq_bits_uop_debug_pc_0; // @[util.scala:448:7]
wire [2:0] io_deq_bits_uop_iq_type_0; // @[util.scala:448:7]
wire [9:0] io_deq_bits_uop_fu_code_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_uop_iw_state_0; // @[util.scala:448:7]
wire io_deq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7]
wire io_deq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7]
wire io_deq_bits_uop_is_br_0; // @[util.scala:448:7]
wire io_deq_bits_uop_is_jalr_0; // @[util.scala:448:7]
wire io_deq_bits_uop_is_jal_0; // @[util.scala:448:7]
wire io_deq_bits_uop_is_sfb_0; // @[util.scala:448:7]
wire [3:0] io_deq_bits_uop_br_mask_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_uop_br_tag_0; // @[util.scala:448:7]
wire [3:0] io_deq_bits_uop_ftq_idx_0; // @[util.scala:448:7]
wire io_deq_bits_uop_edge_inst_0; // @[util.scala:448:7]
wire [5:0] io_deq_bits_uop_pc_lob_0; // @[util.scala:448:7]
wire io_deq_bits_uop_taken_0; // @[util.scala:448:7]
wire [19:0] io_deq_bits_uop_imm_packed_0; // @[util.scala:448:7]
wire [11:0] io_deq_bits_uop_csr_addr_0; // @[util.scala:448:7]
wire [5:0] io_deq_bits_uop_rob_idx_0; // @[util.scala:448:7]
wire [3:0] io_deq_bits_uop_ldq_idx_0; // @[util.scala:448:7]
wire [3:0] io_deq_bits_uop_stq_idx_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_uop_rxq_idx_0; // @[util.scala:448:7]
wire [6:0] io_deq_bits_uop_pdst_0; // @[util.scala:448:7]
wire [6:0] io_deq_bits_uop_prs1_0; // @[util.scala:448:7]
wire [6:0] io_deq_bits_uop_prs2_0; // @[util.scala:448:7]
wire [6:0] io_deq_bits_uop_prs3_0; // @[util.scala:448:7]
wire [3:0] io_deq_bits_uop_ppred_0; // @[util.scala:448:7]
wire io_deq_bits_uop_prs1_busy_0; // @[util.scala:448:7]
wire io_deq_bits_uop_prs2_busy_0; // @[util.scala:448:7]
wire io_deq_bits_uop_prs3_busy_0; // @[util.scala:448:7]
wire io_deq_bits_uop_ppred_busy_0; // @[util.scala:448:7]
wire [6:0] io_deq_bits_uop_stale_pdst_0; // @[util.scala:448:7]
wire io_deq_bits_uop_exception_0; // @[util.scala:448:7]
wire [63:0] io_deq_bits_uop_exc_cause_0; // @[util.scala:448:7]
wire io_deq_bits_uop_bypassable_0; // @[util.scala:448:7]
wire [4:0] io_deq_bits_uop_mem_cmd_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_uop_mem_size_0; // @[util.scala:448:7]
wire io_deq_bits_uop_mem_signed_0; // @[util.scala:448:7]
wire io_deq_bits_uop_is_fence_0; // @[util.scala:448:7]
wire io_deq_bits_uop_is_fencei_0; // @[util.scala:448:7]
wire io_deq_bits_uop_is_amo_0; // @[util.scala:448:7]
wire io_deq_bits_uop_uses_ldq_0; // @[util.scala:448:7]
wire io_deq_bits_uop_uses_stq_0; // @[util.scala:448:7]
wire io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7]
wire io_deq_bits_uop_is_unique_0; // @[util.scala:448:7]
wire io_deq_bits_uop_flush_on_commit_0; // @[util.scala:448:7]
wire io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7]
wire [5:0] io_deq_bits_uop_ldst_0; // @[util.scala:448:7]
wire [5:0] io_deq_bits_uop_lrs1_0; // @[util.scala:448:7]
wire [5:0] io_deq_bits_uop_lrs2_0; // @[util.scala:448:7]
wire [5:0] io_deq_bits_uop_lrs3_0; // @[util.scala:448:7]
wire io_deq_bits_uop_ldst_val_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_uop_dst_rtype_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7]
wire io_deq_bits_uop_frs3_en_0; // @[util.scala:448:7]
wire io_deq_bits_uop_fp_val_0; // @[util.scala:448:7]
wire io_deq_bits_uop_fp_single_0; // @[util.scala:448:7]
wire io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7]
wire io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7]
wire io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7]
wire io_deq_bits_uop_bp_debug_if_0; // @[util.scala:448:7]
wire io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_uop_debug_fsrc_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_uop_debug_tsrc_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_old_meta_coh_state_0; // @[util.scala:448:7]
wire [21:0] io_deq_bits_old_meta_tag_0; // @[util.scala:448:7]
wire [33:0] io_deq_bits_addr_0; // @[util.scala:448:7]
wire [63:0] io_deq_bits_data_0; // @[util.scala:448:7]
wire io_deq_bits_is_hella_0; // @[util.scala:448:7]
wire io_deq_bits_tag_match_0; // @[util.scala:448:7]
wire [1:0] io_deq_bits_way_en; // @[util.scala:448:7]
wire [4:0] io_deq_bits_sdq_id_0; // @[util.scala:448:7]
wire io_deq_valid_0; // @[util.scala:448:7]
wire io_empty_0; // @[util.scala:448:7]
wire [3:0] io_count; // @[util.scala:448:7]
assign out_addr = _ram_ext_R0_data[33:0]; // @[util.scala:464:20, :506:17]
assign out_data = _ram_ext_R0_data[97:34]; // @[util.scala:464:20, :506:17]
assign out_is_hella = _ram_ext_R0_data[98]; // @[util.scala:464:20, :506:17]
assign out_tag_match = _ram_ext_R0_data[99]; // @[util.scala:464:20, :506:17]
assign out_old_meta_coh_state = _ram_ext_R0_data[101:100]; // @[util.scala:464:20, :506:17]
assign out_old_meta_tag = _ram_ext_R0_data[123:102]; // @[util.scala:464:20, :506:17]
assign out_way_en = _ram_ext_R0_data[125:124]; // @[util.scala:464:20, :506:17]
assign out_sdq_id = _ram_ext_R0_data[130:126]; // @[util.scala:464:20, :506:17]
reg valids_0; // @[util.scala:465:24]
wire _valids_0_T_3 = valids_0; // @[util.scala:465:24, :481:29]
reg valids_1; // @[util.scala:465:24]
wire _valids_1_T_3 = valids_1; // @[util.scala:465:24, :481:29]
reg valids_2; // @[util.scala:465:24]
wire _valids_2_T_3 = valids_2; // @[util.scala:465:24, :481:29]
reg valids_3; // @[util.scala:465:24]
wire _valids_3_T_3 = valids_3; // @[util.scala:465:24, :481:29]
reg valids_4; // @[util.scala:465:24]
wire _valids_4_T_3 = valids_4; // @[util.scala:465:24, :481:29]
reg valids_5; // @[util.scala:465:24]
wire _valids_5_T_3 = valids_5; // @[util.scala:465:24, :481:29]
reg valids_6; // @[util.scala:465:24]
wire _valids_6_T_3 = valids_6; // @[util.scala:465:24, :481:29]
reg valids_7; // @[util.scala:465:24]
wire _valids_7_T_3 = valids_7; // @[util.scala:465:24, :481:29]
reg valids_8; // @[util.scala:465:24]
wire _valids_8_T_3 = valids_8; // @[util.scala:465:24, :481:29]
reg valids_9; // @[util.scala:465:24]
wire _valids_9_T_3 = valids_9; // @[util.scala:465:24, :481:29]
reg valids_10; // @[util.scala:465:24]
wire _valids_10_T_3 = valids_10; // @[util.scala:465:24, :481:29]
reg valids_11; // @[util.scala:465:24]
wire _valids_11_T_3 = valids_11; // @[util.scala:465:24, :481:29]
reg valids_12; // @[util.scala:465:24]
wire _valids_12_T_3 = valids_12; // @[util.scala:465:24, :481:29]
reg valids_13; // @[util.scala:465:24]
wire _valids_13_T_3 = valids_13; // @[util.scala:465:24, :481:29]
reg valids_14; // @[util.scala:465:24]
wire _valids_14_T_3 = valids_14; // @[util.scala:465:24, :481:29]
reg valids_15; // @[util.scala:465:24]
wire _valids_15_T_3 = valids_15; // @[util.scala:465:24, :481:29]
reg [6:0] uops_0_uopc; // @[util.scala:466:20]
reg [31:0] uops_0_inst; // @[util.scala:466:20]
reg [31:0] uops_0_debug_inst; // @[util.scala:466:20]
reg uops_0_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_0_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_0_iq_type; // @[util.scala:466:20]
reg [9:0] uops_0_fu_code; // @[util.scala:466:20]
reg [3:0] uops_0_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_0_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_0_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_0_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_0_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_0_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_0_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_0_ctrl_is_load; // @[util.scala:466:20]
reg uops_0_ctrl_is_sta; // @[util.scala:466:20]
reg uops_0_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_0_iw_state; // @[util.scala:466:20]
reg uops_0_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_0_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_0_is_br; // @[util.scala:466:20]
reg uops_0_is_jalr; // @[util.scala:466:20]
reg uops_0_is_jal; // @[util.scala:466:20]
reg uops_0_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_0_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_0_br_mask_T_1 = uops_0_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_0_br_tag; // @[util.scala:466:20]
reg [3:0] uops_0_ftq_idx; // @[util.scala:466:20]
reg uops_0_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_0_pc_lob; // @[util.scala:466:20]
reg uops_0_taken; // @[util.scala:466:20]
reg [19:0] uops_0_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_0_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_0_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_0_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_0_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_0_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_0_pdst; // @[util.scala:466:20]
reg [6:0] uops_0_prs1; // @[util.scala:466:20]
reg [6:0] uops_0_prs2; // @[util.scala:466:20]
reg [6:0] uops_0_prs3; // @[util.scala:466:20]
reg [3:0] uops_0_ppred; // @[util.scala:466:20]
reg uops_0_prs1_busy; // @[util.scala:466:20]
reg uops_0_prs2_busy; // @[util.scala:466:20]
reg uops_0_prs3_busy; // @[util.scala:466:20]
reg uops_0_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_0_stale_pdst; // @[util.scala:466:20]
reg uops_0_exception; // @[util.scala:466:20]
reg [63:0] uops_0_exc_cause; // @[util.scala:466:20]
reg uops_0_bypassable; // @[util.scala:466:20]
reg [4:0] uops_0_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_0_mem_size; // @[util.scala:466:20]
reg uops_0_mem_signed; // @[util.scala:466:20]
reg uops_0_is_fence; // @[util.scala:466:20]
reg uops_0_is_fencei; // @[util.scala:466:20]
reg uops_0_is_amo; // @[util.scala:466:20]
reg uops_0_uses_ldq; // @[util.scala:466:20]
reg uops_0_uses_stq; // @[util.scala:466:20]
reg uops_0_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_0_is_unique; // @[util.scala:466:20]
reg uops_0_flush_on_commit; // @[util.scala:466:20]
reg uops_0_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_0_ldst; // @[util.scala:466:20]
reg [5:0] uops_0_lrs1; // @[util.scala:466:20]
reg [5:0] uops_0_lrs2; // @[util.scala:466:20]
reg [5:0] uops_0_lrs3; // @[util.scala:466:20]
reg uops_0_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_0_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_0_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_0_lrs2_rtype; // @[util.scala:466:20]
reg uops_0_frs3_en; // @[util.scala:466:20]
reg uops_0_fp_val; // @[util.scala:466:20]
reg uops_0_fp_single; // @[util.scala:466:20]
reg uops_0_xcpt_pf_if; // @[util.scala:466:20]
reg uops_0_xcpt_ae_if; // @[util.scala:466:20]
reg uops_0_xcpt_ma_if; // @[util.scala:466:20]
reg uops_0_bp_debug_if; // @[util.scala:466:20]
reg uops_0_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_0_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_0_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_1_uopc; // @[util.scala:466:20]
reg [31:0] uops_1_inst; // @[util.scala:466:20]
reg [31:0] uops_1_debug_inst; // @[util.scala:466:20]
reg uops_1_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_1_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_1_iq_type; // @[util.scala:466:20]
reg [9:0] uops_1_fu_code; // @[util.scala:466:20]
reg [3:0] uops_1_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_1_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_1_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_1_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_1_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_1_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_1_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_1_ctrl_is_load; // @[util.scala:466:20]
reg uops_1_ctrl_is_sta; // @[util.scala:466:20]
reg uops_1_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_1_iw_state; // @[util.scala:466:20]
reg uops_1_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_1_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_1_is_br; // @[util.scala:466:20]
reg uops_1_is_jalr; // @[util.scala:466:20]
reg uops_1_is_jal; // @[util.scala:466:20]
reg uops_1_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_1_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_1_br_mask_T_1 = uops_1_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_1_br_tag; // @[util.scala:466:20]
reg [3:0] uops_1_ftq_idx; // @[util.scala:466:20]
reg uops_1_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_1_pc_lob; // @[util.scala:466:20]
reg uops_1_taken; // @[util.scala:466:20]
reg [19:0] uops_1_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_1_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_1_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_1_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_1_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_1_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_1_pdst; // @[util.scala:466:20]
reg [6:0] uops_1_prs1; // @[util.scala:466:20]
reg [6:0] uops_1_prs2; // @[util.scala:466:20]
reg [6:0] uops_1_prs3; // @[util.scala:466:20]
reg [3:0] uops_1_ppred; // @[util.scala:466:20]
reg uops_1_prs1_busy; // @[util.scala:466:20]
reg uops_1_prs2_busy; // @[util.scala:466:20]
reg uops_1_prs3_busy; // @[util.scala:466:20]
reg uops_1_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_1_stale_pdst; // @[util.scala:466:20]
reg uops_1_exception; // @[util.scala:466:20]
reg [63:0] uops_1_exc_cause; // @[util.scala:466:20]
reg uops_1_bypassable; // @[util.scala:466:20]
reg [4:0] uops_1_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_1_mem_size; // @[util.scala:466:20]
reg uops_1_mem_signed; // @[util.scala:466:20]
reg uops_1_is_fence; // @[util.scala:466:20]
reg uops_1_is_fencei; // @[util.scala:466:20]
reg uops_1_is_amo; // @[util.scala:466:20]
reg uops_1_uses_ldq; // @[util.scala:466:20]
reg uops_1_uses_stq; // @[util.scala:466:20]
reg uops_1_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_1_is_unique; // @[util.scala:466:20]
reg uops_1_flush_on_commit; // @[util.scala:466:20]
reg uops_1_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_1_ldst; // @[util.scala:466:20]
reg [5:0] uops_1_lrs1; // @[util.scala:466:20]
reg [5:0] uops_1_lrs2; // @[util.scala:466:20]
reg [5:0] uops_1_lrs3; // @[util.scala:466:20]
reg uops_1_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_1_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_1_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_1_lrs2_rtype; // @[util.scala:466:20]
reg uops_1_frs3_en; // @[util.scala:466:20]
reg uops_1_fp_val; // @[util.scala:466:20]
reg uops_1_fp_single; // @[util.scala:466:20]
reg uops_1_xcpt_pf_if; // @[util.scala:466:20]
reg uops_1_xcpt_ae_if; // @[util.scala:466:20]
reg uops_1_xcpt_ma_if; // @[util.scala:466:20]
reg uops_1_bp_debug_if; // @[util.scala:466:20]
reg uops_1_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_1_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_1_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_2_uopc; // @[util.scala:466:20]
reg [31:0] uops_2_inst; // @[util.scala:466:20]
reg [31:0] uops_2_debug_inst; // @[util.scala:466:20]
reg uops_2_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_2_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_2_iq_type; // @[util.scala:466:20]
reg [9:0] uops_2_fu_code; // @[util.scala:466:20]
reg [3:0] uops_2_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_2_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_2_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_2_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_2_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_2_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_2_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_2_ctrl_is_load; // @[util.scala:466:20]
reg uops_2_ctrl_is_sta; // @[util.scala:466:20]
reg uops_2_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_2_iw_state; // @[util.scala:466:20]
reg uops_2_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_2_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_2_is_br; // @[util.scala:466:20]
reg uops_2_is_jalr; // @[util.scala:466:20]
reg uops_2_is_jal; // @[util.scala:466:20]
reg uops_2_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_2_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_2_br_mask_T_1 = uops_2_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_2_br_tag; // @[util.scala:466:20]
reg [3:0] uops_2_ftq_idx; // @[util.scala:466:20]
reg uops_2_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_2_pc_lob; // @[util.scala:466:20]
reg uops_2_taken; // @[util.scala:466:20]
reg [19:0] uops_2_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_2_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_2_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_2_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_2_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_2_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_2_pdst; // @[util.scala:466:20]
reg [6:0] uops_2_prs1; // @[util.scala:466:20]
reg [6:0] uops_2_prs2; // @[util.scala:466:20]
reg [6:0] uops_2_prs3; // @[util.scala:466:20]
reg [3:0] uops_2_ppred; // @[util.scala:466:20]
reg uops_2_prs1_busy; // @[util.scala:466:20]
reg uops_2_prs2_busy; // @[util.scala:466:20]
reg uops_2_prs3_busy; // @[util.scala:466:20]
reg uops_2_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_2_stale_pdst; // @[util.scala:466:20]
reg uops_2_exception; // @[util.scala:466:20]
reg [63:0] uops_2_exc_cause; // @[util.scala:466:20]
reg uops_2_bypassable; // @[util.scala:466:20]
reg [4:0] uops_2_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_2_mem_size; // @[util.scala:466:20]
reg uops_2_mem_signed; // @[util.scala:466:20]
reg uops_2_is_fence; // @[util.scala:466:20]
reg uops_2_is_fencei; // @[util.scala:466:20]
reg uops_2_is_amo; // @[util.scala:466:20]
reg uops_2_uses_ldq; // @[util.scala:466:20]
reg uops_2_uses_stq; // @[util.scala:466:20]
reg uops_2_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_2_is_unique; // @[util.scala:466:20]
reg uops_2_flush_on_commit; // @[util.scala:466:20]
reg uops_2_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_2_ldst; // @[util.scala:466:20]
reg [5:0] uops_2_lrs1; // @[util.scala:466:20]
reg [5:0] uops_2_lrs2; // @[util.scala:466:20]
reg [5:0] uops_2_lrs3; // @[util.scala:466:20]
reg uops_2_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_2_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_2_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_2_lrs2_rtype; // @[util.scala:466:20]
reg uops_2_frs3_en; // @[util.scala:466:20]
reg uops_2_fp_val; // @[util.scala:466:20]
reg uops_2_fp_single; // @[util.scala:466:20]
reg uops_2_xcpt_pf_if; // @[util.scala:466:20]
reg uops_2_xcpt_ae_if; // @[util.scala:466:20]
reg uops_2_xcpt_ma_if; // @[util.scala:466:20]
reg uops_2_bp_debug_if; // @[util.scala:466:20]
reg uops_2_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_2_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_2_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_3_uopc; // @[util.scala:466:20]
reg [31:0] uops_3_inst; // @[util.scala:466:20]
reg [31:0] uops_3_debug_inst; // @[util.scala:466:20]
reg uops_3_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_3_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_3_iq_type; // @[util.scala:466:20]
reg [9:0] uops_3_fu_code; // @[util.scala:466:20]
reg [3:0] uops_3_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_3_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_3_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_3_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_3_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_3_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_3_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_3_ctrl_is_load; // @[util.scala:466:20]
reg uops_3_ctrl_is_sta; // @[util.scala:466:20]
reg uops_3_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_3_iw_state; // @[util.scala:466:20]
reg uops_3_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_3_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_3_is_br; // @[util.scala:466:20]
reg uops_3_is_jalr; // @[util.scala:466:20]
reg uops_3_is_jal; // @[util.scala:466:20]
reg uops_3_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_3_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_3_br_mask_T_1 = uops_3_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_3_br_tag; // @[util.scala:466:20]
reg [3:0] uops_3_ftq_idx; // @[util.scala:466:20]
reg uops_3_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_3_pc_lob; // @[util.scala:466:20]
reg uops_3_taken; // @[util.scala:466:20]
reg [19:0] uops_3_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_3_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_3_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_3_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_3_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_3_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_3_pdst; // @[util.scala:466:20]
reg [6:0] uops_3_prs1; // @[util.scala:466:20]
reg [6:0] uops_3_prs2; // @[util.scala:466:20]
reg [6:0] uops_3_prs3; // @[util.scala:466:20]
reg [3:0] uops_3_ppred; // @[util.scala:466:20]
reg uops_3_prs1_busy; // @[util.scala:466:20]
reg uops_3_prs2_busy; // @[util.scala:466:20]
reg uops_3_prs3_busy; // @[util.scala:466:20]
reg uops_3_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_3_stale_pdst; // @[util.scala:466:20]
reg uops_3_exception; // @[util.scala:466:20]
reg [63:0] uops_3_exc_cause; // @[util.scala:466:20]
reg uops_3_bypassable; // @[util.scala:466:20]
reg [4:0] uops_3_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_3_mem_size; // @[util.scala:466:20]
reg uops_3_mem_signed; // @[util.scala:466:20]
reg uops_3_is_fence; // @[util.scala:466:20]
reg uops_3_is_fencei; // @[util.scala:466:20]
reg uops_3_is_amo; // @[util.scala:466:20]
reg uops_3_uses_ldq; // @[util.scala:466:20]
reg uops_3_uses_stq; // @[util.scala:466:20]
reg uops_3_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_3_is_unique; // @[util.scala:466:20]
reg uops_3_flush_on_commit; // @[util.scala:466:20]
reg uops_3_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_3_ldst; // @[util.scala:466:20]
reg [5:0] uops_3_lrs1; // @[util.scala:466:20]
reg [5:0] uops_3_lrs2; // @[util.scala:466:20]
reg [5:0] uops_3_lrs3; // @[util.scala:466:20]
reg uops_3_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_3_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_3_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_3_lrs2_rtype; // @[util.scala:466:20]
reg uops_3_frs3_en; // @[util.scala:466:20]
reg uops_3_fp_val; // @[util.scala:466:20]
reg uops_3_fp_single; // @[util.scala:466:20]
reg uops_3_xcpt_pf_if; // @[util.scala:466:20]
reg uops_3_xcpt_ae_if; // @[util.scala:466:20]
reg uops_3_xcpt_ma_if; // @[util.scala:466:20]
reg uops_3_bp_debug_if; // @[util.scala:466:20]
reg uops_3_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_3_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_3_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_4_uopc; // @[util.scala:466:20]
reg [31:0] uops_4_inst; // @[util.scala:466:20]
reg [31:0] uops_4_debug_inst; // @[util.scala:466:20]
reg uops_4_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_4_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_4_iq_type; // @[util.scala:466:20]
reg [9:0] uops_4_fu_code; // @[util.scala:466:20]
reg [3:0] uops_4_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_4_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_4_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_4_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_4_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_4_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_4_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_4_ctrl_is_load; // @[util.scala:466:20]
reg uops_4_ctrl_is_sta; // @[util.scala:466:20]
reg uops_4_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_4_iw_state; // @[util.scala:466:20]
reg uops_4_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_4_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_4_is_br; // @[util.scala:466:20]
reg uops_4_is_jalr; // @[util.scala:466:20]
reg uops_4_is_jal; // @[util.scala:466:20]
reg uops_4_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_4_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_4_br_mask_T_1 = uops_4_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_4_br_tag; // @[util.scala:466:20]
reg [3:0] uops_4_ftq_idx; // @[util.scala:466:20]
reg uops_4_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_4_pc_lob; // @[util.scala:466:20]
reg uops_4_taken; // @[util.scala:466:20]
reg [19:0] uops_4_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_4_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_4_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_4_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_4_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_4_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_4_pdst; // @[util.scala:466:20]
reg [6:0] uops_4_prs1; // @[util.scala:466:20]
reg [6:0] uops_4_prs2; // @[util.scala:466:20]
reg [6:0] uops_4_prs3; // @[util.scala:466:20]
reg [3:0] uops_4_ppred; // @[util.scala:466:20]
reg uops_4_prs1_busy; // @[util.scala:466:20]
reg uops_4_prs2_busy; // @[util.scala:466:20]
reg uops_4_prs3_busy; // @[util.scala:466:20]
reg uops_4_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_4_stale_pdst; // @[util.scala:466:20]
reg uops_4_exception; // @[util.scala:466:20]
reg [63:0] uops_4_exc_cause; // @[util.scala:466:20]
reg uops_4_bypassable; // @[util.scala:466:20]
reg [4:0] uops_4_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_4_mem_size; // @[util.scala:466:20]
reg uops_4_mem_signed; // @[util.scala:466:20]
reg uops_4_is_fence; // @[util.scala:466:20]
reg uops_4_is_fencei; // @[util.scala:466:20]
reg uops_4_is_amo; // @[util.scala:466:20]
reg uops_4_uses_ldq; // @[util.scala:466:20]
reg uops_4_uses_stq; // @[util.scala:466:20]
reg uops_4_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_4_is_unique; // @[util.scala:466:20]
reg uops_4_flush_on_commit; // @[util.scala:466:20]
reg uops_4_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_4_ldst; // @[util.scala:466:20]
reg [5:0] uops_4_lrs1; // @[util.scala:466:20]
reg [5:0] uops_4_lrs2; // @[util.scala:466:20]
reg [5:0] uops_4_lrs3; // @[util.scala:466:20]
reg uops_4_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_4_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_4_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_4_lrs2_rtype; // @[util.scala:466:20]
reg uops_4_frs3_en; // @[util.scala:466:20]
reg uops_4_fp_val; // @[util.scala:466:20]
reg uops_4_fp_single; // @[util.scala:466:20]
reg uops_4_xcpt_pf_if; // @[util.scala:466:20]
reg uops_4_xcpt_ae_if; // @[util.scala:466:20]
reg uops_4_xcpt_ma_if; // @[util.scala:466:20]
reg uops_4_bp_debug_if; // @[util.scala:466:20]
reg uops_4_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_4_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_4_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_5_uopc; // @[util.scala:466:20]
reg [31:0] uops_5_inst; // @[util.scala:466:20]
reg [31:0] uops_5_debug_inst; // @[util.scala:466:20]
reg uops_5_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_5_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_5_iq_type; // @[util.scala:466:20]
reg [9:0] uops_5_fu_code; // @[util.scala:466:20]
reg [3:0] uops_5_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_5_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_5_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_5_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_5_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_5_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_5_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_5_ctrl_is_load; // @[util.scala:466:20]
reg uops_5_ctrl_is_sta; // @[util.scala:466:20]
reg uops_5_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_5_iw_state; // @[util.scala:466:20]
reg uops_5_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_5_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_5_is_br; // @[util.scala:466:20]
reg uops_5_is_jalr; // @[util.scala:466:20]
reg uops_5_is_jal; // @[util.scala:466:20]
reg uops_5_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_5_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_5_br_mask_T_1 = uops_5_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_5_br_tag; // @[util.scala:466:20]
reg [3:0] uops_5_ftq_idx; // @[util.scala:466:20]
reg uops_5_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_5_pc_lob; // @[util.scala:466:20]
reg uops_5_taken; // @[util.scala:466:20]
reg [19:0] uops_5_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_5_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_5_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_5_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_5_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_5_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_5_pdst; // @[util.scala:466:20]
reg [6:0] uops_5_prs1; // @[util.scala:466:20]
reg [6:0] uops_5_prs2; // @[util.scala:466:20]
reg [6:0] uops_5_prs3; // @[util.scala:466:20]
reg [3:0] uops_5_ppred; // @[util.scala:466:20]
reg uops_5_prs1_busy; // @[util.scala:466:20]
reg uops_5_prs2_busy; // @[util.scala:466:20]
reg uops_5_prs3_busy; // @[util.scala:466:20]
reg uops_5_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_5_stale_pdst; // @[util.scala:466:20]
reg uops_5_exception; // @[util.scala:466:20]
reg [63:0] uops_5_exc_cause; // @[util.scala:466:20]
reg uops_5_bypassable; // @[util.scala:466:20]
reg [4:0] uops_5_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_5_mem_size; // @[util.scala:466:20]
reg uops_5_mem_signed; // @[util.scala:466:20]
reg uops_5_is_fence; // @[util.scala:466:20]
reg uops_5_is_fencei; // @[util.scala:466:20]
reg uops_5_is_amo; // @[util.scala:466:20]
reg uops_5_uses_ldq; // @[util.scala:466:20]
reg uops_5_uses_stq; // @[util.scala:466:20]
reg uops_5_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_5_is_unique; // @[util.scala:466:20]
reg uops_5_flush_on_commit; // @[util.scala:466:20]
reg uops_5_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_5_ldst; // @[util.scala:466:20]
reg [5:0] uops_5_lrs1; // @[util.scala:466:20]
reg [5:0] uops_5_lrs2; // @[util.scala:466:20]
reg [5:0] uops_5_lrs3; // @[util.scala:466:20]
reg uops_5_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_5_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_5_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_5_lrs2_rtype; // @[util.scala:466:20]
reg uops_5_frs3_en; // @[util.scala:466:20]
reg uops_5_fp_val; // @[util.scala:466:20]
reg uops_5_fp_single; // @[util.scala:466:20]
reg uops_5_xcpt_pf_if; // @[util.scala:466:20]
reg uops_5_xcpt_ae_if; // @[util.scala:466:20]
reg uops_5_xcpt_ma_if; // @[util.scala:466:20]
reg uops_5_bp_debug_if; // @[util.scala:466:20]
reg uops_5_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_5_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_5_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_6_uopc; // @[util.scala:466:20]
reg [31:0] uops_6_inst; // @[util.scala:466:20]
reg [31:0] uops_6_debug_inst; // @[util.scala:466:20]
reg uops_6_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_6_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_6_iq_type; // @[util.scala:466:20]
reg [9:0] uops_6_fu_code; // @[util.scala:466:20]
reg [3:0] uops_6_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_6_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_6_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_6_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_6_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_6_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_6_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_6_ctrl_is_load; // @[util.scala:466:20]
reg uops_6_ctrl_is_sta; // @[util.scala:466:20]
reg uops_6_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_6_iw_state; // @[util.scala:466:20]
reg uops_6_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_6_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_6_is_br; // @[util.scala:466:20]
reg uops_6_is_jalr; // @[util.scala:466:20]
reg uops_6_is_jal; // @[util.scala:466:20]
reg uops_6_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_6_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_6_br_mask_T_1 = uops_6_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_6_br_tag; // @[util.scala:466:20]
reg [3:0] uops_6_ftq_idx; // @[util.scala:466:20]
reg uops_6_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_6_pc_lob; // @[util.scala:466:20]
reg uops_6_taken; // @[util.scala:466:20]
reg [19:0] uops_6_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_6_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_6_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_6_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_6_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_6_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_6_pdst; // @[util.scala:466:20]
reg [6:0] uops_6_prs1; // @[util.scala:466:20]
reg [6:0] uops_6_prs2; // @[util.scala:466:20]
reg [6:0] uops_6_prs3; // @[util.scala:466:20]
reg [3:0] uops_6_ppred; // @[util.scala:466:20]
reg uops_6_prs1_busy; // @[util.scala:466:20]
reg uops_6_prs2_busy; // @[util.scala:466:20]
reg uops_6_prs3_busy; // @[util.scala:466:20]
reg uops_6_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_6_stale_pdst; // @[util.scala:466:20]
reg uops_6_exception; // @[util.scala:466:20]
reg [63:0] uops_6_exc_cause; // @[util.scala:466:20]
reg uops_6_bypassable; // @[util.scala:466:20]
reg [4:0] uops_6_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_6_mem_size; // @[util.scala:466:20]
reg uops_6_mem_signed; // @[util.scala:466:20]
reg uops_6_is_fence; // @[util.scala:466:20]
reg uops_6_is_fencei; // @[util.scala:466:20]
reg uops_6_is_amo; // @[util.scala:466:20]
reg uops_6_uses_ldq; // @[util.scala:466:20]
reg uops_6_uses_stq; // @[util.scala:466:20]
reg uops_6_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_6_is_unique; // @[util.scala:466:20]
reg uops_6_flush_on_commit; // @[util.scala:466:20]
reg uops_6_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_6_ldst; // @[util.scala:466:20]
reg [5:0] uops_6_lrs1; // @[util.scala:466:20]
reg [5:0] uops_6_lrs2; // @[util.scala:466:20]
reg [5:0] uops_6_lrs3; // @[util.scala:466:20]
reg uops_6_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_6_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_6_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_6_lrs2_rtype; // @[util.scala:466:20]
reg uops_6_frs3_en; // @[util.scala:466:20]
reg uops_6_fp_val; // @[util.scala:466:20]
reg uops_6_fp_single; // @[util.scala:466:20]
reg uops_6_xcpt_pf_if; // @[util.scala:466:20]
reg uops_6_xcpt_ae_if; // @[util.scala:466:20]
reg uops_6_xcpt_ma_if; // @[util.scala:466:20]
reg uops_6_bp_debug_if; // @[util.scala:466:20]
reg uops_6_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_6_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_6_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_7_uopc; // @[util.scala:466:20]
reg [31:0] uops_7_inst; // @[util.scala:466:20]
reg [31:0] uops_7_debug_inst; // @[util.scala:466:20]
reg uops_7_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_7_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_7_iq_type; // @[util.scala:466:20]
reg [9:0] uops_7_fu_code; // @[util.scala:466:20]
reg [3:0] uops_7_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_7_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_7_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_7_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_7_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_7_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_7_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_7_ctrl_is_load; // @[util.scala:466:20]
reg uops_7_ctrl_is_sta; // @[util.scala:466:20]
reg uops_7_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_7_iw_state; // @[util.scala:466:20]
reg uops_7_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_7_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_7_is_br; // @[util.scala:466:20]
reg uops_7_is_jalr; // @[util.scala:466:20]
reg uops_7_is_jal; // @[util.scala:466:20]
reg uops_7_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_7_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_7_br_mask_T_1 = uops_7_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_7_br_tag; // @[util.scala:466:20]
reg [3:0] uops_7_ftq_idx; // @[util.scala:466:20]
reg uops_7_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_7_pc_lob; // @[util.scala:466:20]
reg uops_7_taken; // @[util.scala:466:20]
reg [19:0] uops_7_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_7_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_7_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_7_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_7_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_7_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_7_pdst; // @[util.scala:466:20]
reg [6:0] uops_7_prs1; // @[util.scala:466:20]
reg [6:0] uops_7_prs2; // @[util.scala:466:20]
reg [6:0] uops_7_prs3; // @[util.scala:466:20]
reg [3:0] uops_7_ppred; // @[util.scala:466:20]
reg uops_7_prs1_busy; // @[util.scala:466:20]
reg uops_7_prs2_busy; // @[util.scala:466:20]
reg uops_7_prs3_busy; // @[util.scala:466:20]
reg uops_7_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_7_stale_pdst; // @[util.scala:466:20]
reg uops_7_exception; // @[util.scala:466:20]
reg [63:0] uops_7_exc_cause; // @[util.scala:466:20]
reg uops_7_bypassable; // @[util.scala:466:20]
reg [4:0] uops_7_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_7_mem_size; // @[util.scala:466:20]
reg uops_7_mem_signed; // @[util.scala:466:20]
reg uops_7_is_fence; // @[util.scala:466:20]
reg uops_7_is_fencei; // @[util.scala:466:20]
reg uops_7_is_amo; // @[util.scala:466:20]
reg uops_7_uses_ldq; // @[util.scala:466:20]
reg uops_7_uses_stq; // @[util.scala:466:20]
reg uops_7_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_7_is_unique; // @[util.scala:466:20]
reg uops_7_flush_on_commit; // @[util.scala:466:20]
reg uops_7_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_7_ldst; // @[util.scala:466:20]
reg [5:0] uops_7_lrs1; // @[util.scala:466:20]
reg [5:0] uops_7_lrs2; // @[util.scala:466:20]
reg [5:0] uops_7_lrs3; // @[util.scala:466:20]
reg uops_7_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_7_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_7_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_7_lrs2_rtype; // @[util.scala:466:20]
reg uops_7_frs3_en; // @[util.scala:466:20]
reg uops_7_fp_val; // @[util.scala:466:20]
reg uops_7_fp_single; // @[util.scala:466:20]
reg uops_7_xcpt_pf_if; // @[util.scala:466:20]
reg uops_7_xcpt_ae_if; // @[util.scala:466:20]
reg uops_7_xcpt_ma_if; // @[util.scala:466:20]
reg uops_7_bp_debug_if; // @[util.scala:466:20]
reg uops_7_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_7_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_7_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_8_uopc; // @[util.scala:466:20]
reg [31:0] uops_8_inst; // @[util.scala:466:20]
reg [31:0] uops_8_debug_inst; // @[util.scala:466:20]
reg uops_8_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_8_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_8_iq_type; // @[util.scala:466:20]
reg [9:0] uops_8_fu_code; // @[util.scala:466:20]
reg [3:0] uops_8_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_8_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_8_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_8_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_8_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_8_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_8_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_8_ctrl_is_load; // @[util.scala:466:20]
reg uops_8_ctrl_is_sta; // @[util.scala:466:20]
reg uops_8_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_8_iw_state; // @[util.scala:466:20]
reg uops_8_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_8_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_8_is_br; // @[util.scala:466:20]
reg uops_8_is_jalr; // @[util.scala:466:20]
reg uops_8_is_jal; // @[util.scala:466:20]
reg uops_8_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_8_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_8_br_mask_T_1 = uops_8_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_8_br_tag; // @[util.scala:466:20]
reg [3:0] uops_8_ftq_idx; // @[util.scala:466:20]
reg uops_8_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_8_pc_lob; // @[util.scala:466:20]
reg uops_8_taken; // @[util.scala:466:20]
reg [19:0] uops_8_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_8_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_8_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_8_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_8_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_8_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_8_pdst; // @[util.scala:466:20]
reg [6:0] uops_8_prs1; // @[util.scala:466:20]
reg [6:0] uops_8_prs2; // @[util.scala:466:20]
reg [6:0] uops_8_prs3; // @[util.scala:466:20]
reg [3:0] uops_8_ppred; // @[util.scala:466:20]
reg uops_8_prs1_busy; // @[util.scala:466:20]
reg uops_8_prs2_busy; // @[util.scala:466:20]
reg uops_8_prs3_busy; // @[util.scala:466:20]
reg uops_8_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_8_stale_pdst; // @[util.scala:466:20]
reg uops_8_exception; // @[util.scala:466:20]
reg [63:0] uops_8_exc_cause; // @[util.scala:466:20]
reg uops_8_bypassable; // @[util.scala:466:20]
reg [4:0] uops_8_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_8_mem_size; // @[util.scala:466:20]
reg uops_8_mem_signed; // @[util.scala:466:20]
reg uops_8_is_fence; // @[util.scala:466:20]
reg uops_8_is_fencei; // @[util.scala:466:20]
reg uops_8_is_amo; // @[util.scala:466:20]
reg uops_8_uses_ldq; // @[util.scala:466:20]
reg uops_8_uses_stq; // @[util.scala:466:20]
reg uops_8_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_8_is_unique; // @[util.scala:466:20]
reg uops_8_flush_on_commit; // @[util.scala:466:20]
reg uops_8_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_8_ldst; // @[util.scala:466:20]
reg [5:0] uops_8_lrs1; // @[util.scala:466:20]
reg [5:0] uops_8_lrs2; // @[util.scala:466:20]
reg [5:0] uops_8_lrs3; // @[util.scala:466:20]
reg uops_8_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_8_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_8_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_8_lrs2_rtype; // @[util.scala:466:20]
reg uops_8_frs3_en; // @[util.scala:466:20]
reg uops_8_fp_val; // @[util.scala:466:20]
reg uops_8_fp_single; // @[util.scala:466:20]
reg uops_8_xcpt_pf_if; // @[util.scala:466:20]
reg uops_8_xcpt_ae_if; // @[util.scala:466:20]
reg uops_8_xcpt_ma_if; // @[util.scala:466:20]
reg uops_8_bp_debug_if; // @[util.scala:466:20]
reg uops_8_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_8_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_8_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_9_uopc; // @[util.scala:466:20]
reg [31:0] uops_9_inst; // @[util.scala:466:20]
reg [31:0] uops_9_debug_inst; // @[util.scala:466:20]
reg uops_9_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_9_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_9_iq_type; // @[util.scala:466:20]
reg [9:0] uops_9_fu_code; // @[util.scala:466:20]
reg [3:0] uops_9_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_9_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_9_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_9_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_9_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_9_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_9_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_9_ctrl_is_load; // @[util.scala:466:20]
reg uops_9_ctrl_is_sta; // @[util.scala:466:20]
reg uops_9_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_9_iw_state; // @[util.scala:466:20]
reg uops_9_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_9_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_9_is_br; // @[util.scala:466:20]
reg uops_9_is_jalr; // @[util.scala:466:20]
reg uops_9_is_jal; // @[util.scala:466:20]
reg uops_9_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_9_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_9_br_mask_T_1 = uops_9_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_9_br_tag; // @[util.scala:466:20]
reg [3:0] uops_9_ftq_idx; // @[util.scala:466:20]
reg uops_9_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_9_pc_lob; // @[util.scala:466:20]
reg uops_9_taken; // @[util.scala:466:20]
reg [19:0] uops_9_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_9_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_9_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_9_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_9_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_9_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_9_pdst; // @[util.scala:466:20]
reg [6:0] uops_9_prs1; // @[util.scala:466:20]
reg [6:0] uops_9_prs2; // @[util.scala:466:20]
reg [6:0] uops_9_prs3; // @[util.scala:466:20]
reg [3:0] uops_9_ppred; // @[util.scala:466:20]
reg uops_9_prs1_busy; // @[util.scala:466:20]
reg uops_9_prs2_busy; // @[util.scala:466:20]
reg uops_9_prs3_busy; // @[util.scala:466:20]
reg uops_9_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_9_stale_pdst; // @[util.scala:466:20]
reg uops_9_exception; // @[util.scala:466:20]
reg [63:0] uops_9_exc_cause; // @[util.scala:466:20]
reg uops_9_bypassable; // @[util.scala:466:20]
reg [4:0] uops_9_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_9_mem_size; // @[util.scala:466:20]
reg uops_9_mem_signed; // @[util.scala:466:20]
reg uops_9_is_fence; // @[util.scala:466:20]
reg uops_9_is_fencei; // @[util.scala:466:20]
reg uops_9_is_amo; // @[util.scala:466:20]
reg uops_9_uses_ldq; // @[util.scala:466:20]
reg uops_9_uses_stq; // @[util.scala:466:20]
reg uops_9_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_9_is_unique; // @[util.scala:466:20]
reg uops_9_flush_on_commit; // @[util.scala:466:20]
reg uops_9_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_9_ldst; // @[util.scala:466:20]
reg [5:0] uops_9_lrs1; // @[util.scala:466:20]
reg [5:0] uops_9_lrs2; // @[util.scala:466:20]
reg [5:0] uops_9_lrs3; // @[util.scala:466:20]
reg uops_9_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_9_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_9_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_9_lrs2_rtype; // @[util.scala:466:20]
reg uops_9_frs3_en; // @[util.scala:466:20]
reg uops_9_fp_val; // @[util.scala:466:20]
reg uops_9_fp_single; // @[util.scala:466:20]
reg uops_9_xcpt_pf_if; // @[util.scala:466:20]
reg uops_9_xcpt_ae_if; // @[util.scala:466:20]
reg uops_9_xcpt_ma_if; // @[util.scala:466:20]
reg uops_9_bp_debug_if; // @[util.scala:466:20]
reg uops_9_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_9_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_9_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_10_uopc; // @[util.scala:466:20]
reg [31:0] uops_10_inst; // @[util.scala:466:20]
reg [31:0] uops_10_debug_inst; // @[util.scala:466:20]
reg uops_10_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_10_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_10_iq_type; // @[util.scala:466:20]
reg [9:0] uops_10_fu_code; // @[util.scala:466:20]
reg [3:0] uops_10_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_10_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_10_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_10_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_10_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_10_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_10_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_10_ctrl_is_load; // @[util.scala:466:20]
reg uops_10_ctrl_is_sta; // @[util.scala:466:20]
reg uops_10_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_10_iw_state; // @[util.scala:466:20]
reg uops_10_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_10_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_10_is_br; // @[util.scala:466:20]
reg uops_10_is_jalr; // @[util.scala:466:20]
reg uops_10_is_jal; // @[util.scala:466:20]
reg uops_10_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_10_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_10_br_mask_T_1 = uops_10_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_10_br_tag; // @[util.scala:466:20]
reg [3:0] uops_10_ftq_idx; // @[util.scala:466:20]
reg uops_10_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_10_pc_lob; // @[util.scala:466:20]
reg uops_10_taken; // @[util.scala:466:20]
reg [19:0] uops_10_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_10_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_10_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_10_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_10_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_10_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_10_pdst; // @[util.scala:466:20]
reg [6:0] uops_10_prs1; // @[util.scala:466:20]
reg [6:0] uops_10_prs2; // @[util.scala:466:20]
reg [6:0] uops_10_prs3; // @[util.scala:466:20]
reg [3:0] uops_10_ppred; // @[util.scala:466:20]
reg uops_10_prs1_busy; // @[util.scala:466:20]
reg uops_10_prs2_busy; // @[util.scala:466:20]
reg uops_10_prs3_busy; // @[util.scala:466:20]
reg uops_10_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_10_stale_pdst; // @[util.scala:466:20]
reg uops_10_exception; // @[util.scala:466:20]
reg [63:0] uops_10_exc_cause; // @[util.scala:466:20]
reg uops_10_bypassable; // @[util.scala:466:20]
reg [4:0] uops_10_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_10_mem_size; // @[util.scala:466:20]
reg uops_10_mem_signed; // @[util.scala:466:20]
reg uops_10_is_fence; // @[util.scala:466:20]
reg uops_10_is_fencei; // @[util.scala:466:20]
reg uops_10_is_amo; // @[util.scala:466:20]
reg uops_10_uses_ldq; // @[util.scala:466:20]
reg uops_10_uses_stq; // @[util.scala:466:20]
reg uops_10_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_10_is_unique; // @[util.scala:466:20]
reg uops_10_flush_on_commit; // @[util.scala:466:20]
reg uops_10_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_10_ldst; // @[util.scala:466:20]
reg [5:0] uops_10_lrs1; // @[util.scala:466:20]
reg [5:0] uops_10_lrs2; // @[util.scala:466:20]
reg [5:0] uops_10_lrs3; // @[util.scala:466:20]
reg uops_10_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_10_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_10_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_10_lrs2_rtype; // @[util.scala:466:20]
reg uops_10_frs3_en; // @[util.scala:466:20]
reg uops_10_fp_val; // @[util.scala:466:20]
reg uops_10_fp_single; // @[util.scala:466:20]
reg uops_10_xcpt_pf_if; // @[util.scala:466:20]
reg uops_10_xcpt_ae_if; // @[util.scala:466:20]
reg uops_10_xcpt_ma_if; // @[util.scala:466:20]
reg uops_10_bp_debug_if; // @[util.scala:466:20]
reg uops_10_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_10_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_10_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_11_uopc; // @[util.scala:466:20]
reg [31:0] uops_11_inst; // @[util.scala:466:20]
reg [31:0] uops_11_debug_inst; // @[util.scala:466:20]
reg uops_11_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_11_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_11_iq_type; // @[util.scala:466:20]
reg [9:0] uops_11_fu_code; // @[util.scala:466:20]
reg [3:0] uops_11_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_11_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_11_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_11_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_11_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_11_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_11_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_11_ctrl_is_load; // @[util.scala:466:20]
reg uops_11_ctrl_is_sta; // @[util.scala:466:20]
reg uops_11_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_11_iw_state; // @[util.scala:466:20]
reg uops_11_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_11_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_11_is_br; // @[util.scala:466:20]
reg uops_11_is_jalr; // @[util.scala:466:20]
reg uops_11_is_jal; // @[util.scala:466:20]
reg uops_11_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_11_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_11_br_mask_T_1 = uops_11_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_11_br_tag; // @[util.scala:466:20]
reg [3:0] uops_11_ftq_idx; // @[util.scala:466:20]
reg uops_11_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_11_pc_lob; // @[util.scala:466:20]
reg uops_11_taken; // @[util.scala:466:20]
reg [19:0] uops_11_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_11_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_11_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_11_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_11_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_11_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_11_pdst; // @[util.scala:466:20]
reg [6:0] uops_11_prs1; // @[util.scala:466:20]
reg [6:0] uops_11_prs2; // @[util.scala:466:20]
reg [6:0] uops_11_prs3; // @[util.scala:466:20]
reg [3:0] uops_11_ppred; // @[util.scala:466:20]
reg uops_11_prs1_busy; // @[util.scala:466:20]
reg uops_11_prs2_busy; // @[util.scala:466:20]
reg uops_11_prs3_busy; // @[util.scala:466:20]
reg uops_11_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_11_stale_pdst; // @[util.scala:466:20]
reg uops_11_exception; // @[util.scala:466:20]
reg [63:0] uops_11_exc_cause; // @[util.scala:466:20]
reg uops_11_bypassable; // @[util.scala:466:20]
reg [4:0] uops_11_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_11_mem_size; // @[util.scala:466:20]
reg uops_11_mem_signed; // @[util.scala:466:20]
reg uops_11_is_fence; // @[util.scala:466:20]
reg uops_11_is_fencei; // @[util.scala:466:20]
reg uops_11_is_amo; // @[util.scala:466:20]
reg uops_11_uses_ldq; // @[util.scala:466:20]
reg uops_11_uses_stq; // @[util.scala:466:20]
reg uops_11_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_11_is_unique; // @[util.scala:466:20]
reg uops_11_flush_on_commit; // @[util.scala:466:20]
reg uops_11_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_11_ldst; // @[util.scala:466:20]
reg [5:0] uops_11_lrs1; // @[util.scala:466:20]
reg [5:0] uops_11_lrs2; // @[util.scala:466:20]
reg [5:0] uops_11_lrs3; // @[util.scala:466:20]
reg uops_11_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_11_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_11_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_11_lrs2_rtype; // @[util.scala:466:20]
reg uops_11_frs3_en; // @[util.scala:466:20]
reg uops_11_fp_val; // @[util.scala:466:20]
reg uops_11_fp_single; // @[util.scala:466:20]
reg uops_11_xcpt_pf_if; // @[util.scala:466:20]
reg uops_11_xcpt_ae_if; // @[util.scala:466:20]
reg uops_11_xcpt_ma_if; // @[util.scala:466:20]
reg uops_11_bp_debug_if; // @[util.scala:466:20]
reg uops_11_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_11_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_11_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_12_uopc; // @[util.scala:466:20]
reg [31:0] uops_12_inst; // @[util.scala:466:20]
reg [31:0] uops_12_debug_inst; // @[util.scala:466:20]
reg uops_12_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_12_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_12_iq_type; // @[util.scala:466:20]
reg [9:0] uops_12_fu_code; // @[util.scala:466:20]
reg [3:0] uops_12_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_12_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_12_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_12_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_12_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_12_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_12_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_12_ctrl_is_load; // @[util.scala:466:20]
reg uops_12_ctrl_is_sta; // @[util.scala:466:20]
reg uops_12_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_12_iw_state; // @[util.scala:466:20]
reg uops_12_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_12_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_12_is_br; // @[util.scala:466:20]
reg uops_12_is_jalr; // @[util.scala:466:20]
reg uops_12_is_jal; // @[util.scala:466:20]
reg uops_12_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_12_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_12_br_mask_T_1 = uops_12_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_12_br_tag; // @[util.scala:466:20]
reg [3:0] uops_12_ftq_idx; // @[util.scala:466:20]
reg uops_12_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_12_pc_lob; // @[util.scala:466:20]
reg uops_12_taken; // @[util.scala:466:20]
reg [19:0] uops_12_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_12_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_12_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_12_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_12_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_12_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_12_pdst; // @[util.scala:466:20]
reg [6:0] uops_12_prs1; // @[util.scala:466:20]
reg [6:0] uops_12_prs2; // @[util.scala:466:20]
reg [6:0] uops_12_prs3; // @[util.scala:466:20]
reg [3:0] uops_12_ppred; // @[util.scala:466:20]
reg uops_12_prs1_busy; // @[util.scala:466:20]
reg uops_12_prs2_busy; // @[util.scala:466:20]
reg uops_12_prs3_busy; // @[util.scala:466:20]
reg uops_12_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_12_stale_pdst; // @[util.scala:466:20]
reg uops_12_exception; // @[util.scala:466:20]
reg [63:0] uops_12_exc_cause; // @[util.scala:466:20]
reg uops_12_bypassable; // @[util.scala:466:20]
reg [4:0] uops_12_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_12_mem_size; // @[util.scala:466:20]
reg uops_12_mem_signed; // @[util.scala:466:20]
reg uops_12_is_fence; // @[util.scala:466:20]
reg uops_12_is_fencei; // @[util.scala:466:20]
reg uops_12_is_amo; // @[util.scala:466:20]
reg uops_12_uses_ldq; // @[util.scala:466:20]
reg uops_12_uses_stq; // @[util.scala:466:20]
reg uops_12_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_12_is_unique; // @[util.scala:466:20]
reg uops_12_flush_on_commit; // @[util.scala:466:20]
reg uops_12_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_12_ldst; // @[util.scala:466:20]
reg [5:0] uops_12_lrs1; // @[util.scala:466:20]
reg [5:0] uops_12_lrs2; // @[util.scala:466:20]
reg [5:0] uops_12_lrs3; // @[util.scala:466:20]
reg uops_12_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_12_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_12_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_12_lrs2_rtype; // @[util.scala:466:20]
reg uops_12_frs3_en; // @[util.scala:466:20]
reg uops_12_fp_val; // @[util.scala:466:20]
reg uops_12_fp_single; // @[util.scala:466:20]
reg uops_12_xcpt_pf_if; // @[util.scala:466:20]
reg uops_12_xcpt_ae_if; // @[util.scala:466:20]
reg uops_12_xcpt_ma_if; // @[util.scala:466:20]
reg uops_12_bp_debug_if; // @[util.scala:466:20]
reg uops_12_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_12_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_12_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_13_uopc; // @[util.scala:466:20]
reg [31:0] uops_13_inst; // @[util.scala:466:20]
reg [31:0] uops_13_debug_inst; // @[util.scala:466:20]
reg uops_13_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_13_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_13_iq_type; // @[util.scala:466:20]
reg [9:0] uops_13_fu_code; // @[util.scala:466:20]
reg [3:0] uops_13_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_13_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_13_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_13_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_13_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_13_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_13_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_13_ctrl_is_load; // @[util.scala:466:20]
reg uops_13_ctrl_is_sta; // @[util.scala:466:20]
reg uops_13_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_13_iw_state; // @[util.scala:466:20]
reg uops_13_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_13_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_13_is_br; // @[util.scala:466:20]
reg uops_13_is_jalr; // @[util.scala:466:20]
reg uops_13_is_jal; // @[util.scala:466:20]
reg uops_13_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_13_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_13_br_mask_T_1 = uops_13_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_13_br_tag; // @[util.scala:466:20]
reg [3:0] uops_13_ftq_idx; // @[util.scala:466:20]
reg uops_13_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_13_pc_lob; // @[util.scala:466:20]
reg uops_13_taken; // @[util.scala:466:20]
reg [19:0] uops_13_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_13_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_13_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_13_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_13_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_13_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_13_pdst; // @[util.scala:466:20]
reg [6:0] uops_13_prs1; // @[util.scala:466:20]
reg [6:0] uops_13_prs2; // @[util.scala:466:20]
reg [6:0] uops_13_prs3; // @[util.scala:466:20]
reg [3:0] uops_13_ppred; // @[util.scala:466:20]
reg uops_13_prs1_busy; // @[util.scala:466:20]
reg uops_13_prs2_busy; // @[util.scala:466:20]
reg uops_13_prs3_busy; // @[util.scala:466:20]
reg uops_13_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_13_stale_pdst; // @[util.scala:466:20]
reg uops_13_exception; // @[util.scala:466:20]
reg [63:0] uops_13_exc_cause; // @[util.scala:466:20]
reg uops_13_bypassable; // @[util.scala:466:20]
reg [4:0] uops_13_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_13_mem_size; // @[util.scala:466:20]
reg uops_13_mem_signed; // @[util.scala:466:20]
reg uops_13_is_fence; // @[util.scala:466:20]
reg uops_13_is_fencei; // @[util.scala:466:20]
reg uops_13_is_amo; // @[util.scala:466:20]
reg uops_13_uses_ldq; // @[util.scala:466:20]
reg uops_13_uses_stq; // @[util.scala:466:20]
reg uops_13_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_13_is_unique; // @[util.scala:466:20]
reg uops_13_flush_on_commit; // @[util.scala:466:20]
reg uops_13_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_13_ldst; // @[util.scala:466:20]
reg [5:0] uops_13_lrs1; // @[util.scala:466:20]
reg [5:0] uops_13_lrs2; // @[util.scala:466:20]
reg [5:0] uops_13_lrs3; // @[util.scala:466:20]
reg uops_13_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_13_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_13_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_13_lrs2_rtype; // @[util.scala:466:20]
reg uops_13_frs3_en; // @[util.scala:466:20]
reg uops_13_fp_val; // @[util.scala:466:20]
reg uops_13_fp_single; // @[util.scala:466:20]
reg uops_13_xcpt_pf_if; // @[util.scala:466:20]
reg uops_13_xcpt_ae_if; // @[util.scala:466:20]
reg uops_13_xcpt_ma_if; // @[util.scala:466:20]
reg uops_13_bp_debug_if; // @[util.scala:466:20]
reg uops_13_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_13_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_13_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_14_uopc; // @[util.scala:466:20]
reg [31:0] uops_14_inst; // @[util.scala:466:20]
reg [31:0] uops_14_debug_inst; // @[util.scala:466:20]
reg uops_14_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_14_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_14_iq_type; // @[util.scala:466:20]
reg [9:0] uops_14_fu_code; // @[util.scala:466:20]
reg [3:0] uops_14_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_14_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_14_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_14_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_14_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_14_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_14_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_14_ctrl_is_load; // @[util.scala:466:20]
reg uops_14_ctrl_is_sta; // @[util.scala:466:20]
reg uops_14_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_14_iw_state; // @[util.scala:466:20]
reg uops_14_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_14_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_14_is_br; // @[util.scala:466:20]
reg uops_14_is_jalr; // @[util.scala:466:20]
reg uops_14_is_jal; // @[util.scala:466:20]
reg uops_14_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_14_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_14_br_mask_T_1 = uops_14_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_14_br_tag; // @[util.scala:466:20]
reg [3:0] uops_14_ftq_idx; // @[util.scala:466:20]
reg uops_14_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_14_pc_lob; // @[util.scala:466:20]
reg uops_14_taken; // @[util.scala:466:20]
reg [19:0] uops_14_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_14_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_14_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_14_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_14_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_14_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_14_pdst; // @[util.scala:466:20]
reg [6:0] uops_14_prs1; // @[util.scala:466:20]
reg [6:0] uops_14_prs2; // @[util.scala:466:20]
reg [6:0] uops_14_prs3; // @[util.scala:466:20]
reg [3:0] uops_14_ppred; // @[util.scala:466:20]
reg uops_14_prs1_busy; // @[util.scala:466:20]
reg uops_14_prs2_busy; // @[util.scala:466:20]
reg uops_14_prs3_busy; // @[util.scala:466:20]
reg uops_14_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_14_stale_pdst; // @[util.scala:466:20]
reg uops_14_exception; // @[util.scala:466:20]
reg [63:0] uops_14_exc_cause; // @[util.scala:466:20]
reg uops_14_bypassable; // @[util.scala:466:20]
reg [4:0] uops_14_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_14_mem_size; // @[util.scala:466:20]
reg uops_14_mem_signed; // @[util.scala:466:20]
reg uops_14_is_fence; // @[util.scala:466:20]
reg uops_14_is_fencei; // @[util.scala:466:20]
reg uops_14_is_amo; // @[util.scala:466:20]
reg uops_14_uses_ldq; // @[util.scala:466:20]
reg uops_14_uses_stq; // @[util.scala:466:20]
reg uops_14_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_14_is_unique; // @[util.scala:466:20]
reg uops_14_flush_on_commit; // @[util.scala:466:20]
reg uops_14_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_14_ldst; // @[util.scala:466:20]
reg [5:0] uops_14_lrs1; // @[util.scala:466:20]
reg [5:0] uops_14_lrs2; // @[util.scala:466:20]
reg [5:0] uops_14_lrs3; // @[util.scala:466:20]
reg uops_14_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_14_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_14_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_14_lrs2_rtype; // @[util.scala:466:20]
reg uops_14_frs3_en; // @[util.scala:466:20]
reg uops_14_fp_val; // @[util.scala:466:20]
reg uops_14_fp_single; // @[util.scala:466:20]
reg uops_14_xcpt_pf_if; // @[util.scala:466:20]
reg uops_14_xcpt_ae_if; // @[util.scala:466:20]
reg uops_14_xcpt_ma_if; // @[util.scala:466:20]
reg uops_14_bp_debug_if; // @[util.scala:466:20]
reg uops_14_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_14_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_14_debug_tsrc; // @[util.scala:466:20]
reg [6:0] uops_15_uopc; // @[util.scala:466:20]
reg [31:0] uops_15_inst; // @[util.scala:466:20]
reg [31:0] uops_15_debug_inst; // @[util.scala:466:20]
reg uops_15_is_rvc; // @[util.scala:466:20]
reg [33:0] uops_15_debug_pc; // @[util.scala:466:20]
reg [2:0] uops_15_iq_type; // @[util.scala:466:20]
reg [9:0] uops_15_fu_code; // @[util.scala:466:20]
reg [3:0] uops_15_ctrl_br_type; // @[util.scala:466:20]
reg [1:0] uops_15_ctrl_op1_sel; // @[util.scala:466:20]
reg [2:0] uops_15_ctrl_op2_sel; // @[util.scala:466:20]
reg [2:0] uops_15_ctrl_imm_sel; // @[util.scala:466:20]
reg [4:0] uops_15_ctrl_op_fcn; // @[util.scala:466:20]
reg uops_15_ctrl_fcn_dw; // @[util.scala:466:20]
reg [2:0] uops_15_ctrl_csr_cmd; // @[util.scala:466:20]
reg uops_15_ctrl_is_load; // @[util.scala:466:20]
reg uops_15_ctrl_is_sta; // @[util.scala:466:20]
reg uops_15_ctrl_is_std; // @[util.scala:466:20]
reg [1:0] uops_15_iw_state; // @[util.scala:466:20]
reg uops_15_iw_p1_poisoned; // @[util.scala:466:20]
reg uops_15_iw_p2_poisoned; // @[util.scala:466:20]
reg uops_15_is_br; // @[util.scala:466:20]
reg uops_15_is_jalr; // @[util.scala:466:20]
reg uops_15_is_jal; // @[util.scala:466:20]
reg uops_15_is_sfb; // @[util.scala:466:20]
reg [3:0] uops_15_br_mask; // @[util.scala:466:20]
wire [3:0] _uops_15_br_mask_T_1 = uops_15_br_mask; // @[util.scala:89:21, :466:20]
reg [1:0] uops_15_br_tag; // @[util.scala:466:20]
reg [3:0] uops_15_ftq_idx; // @[util.scala:466:20]
reg uops_15_edge_inst; // @[util.scala:466:20]
reg [5:0] uops_15_pc_lob; // @[util.scala:466:20]
reg uops_15_taken; // @[util.scala:466:20]
reg [19:0] uops_15_imm_packed; // @[util.scala:466:20]
reg [11:0] uops_15_csr_addr; // @[util.scala:466:20]
reg [5:0] uops_15_rob_idx; // @[util.scala:466:20]
reg [3:0] uops_15_ldq_idx; // @[util.scala:466:20]
reg [3:0] uops_15_stq_idx; // @[util.scala:466:20]
reg [1:0] uops_15_rxq_idx; // @[util.scala:466:20]
reg [6:0] uops_15_pdst; // @[util.scala:466:20]
reg [6:0] uops_15_prs1; // @[util.scala:466:20]
reg [6:0] uops_15_prs2; // @[util.scala:466:20]
reg [6:0] uops_15_prs3; // @[util.scala:466:20]
reg [3:0] uops_15_ppred; // @[util.scala:466:20]
reg uops_15_prs1_busy; // @[util.scala:466:20]
reg uops_15_prs2_busy; // @[util.scala:466:20]
reg uops_15_prs3_busy; // @[util.scala:466:20]
reg uops_15_ppred_busy; // @[util.scala:466:20]
reg [6:0] uops_15_stale_pdst; // @[util.scala:466:20]
reg uops_15_exception; // @[util.scala:466:20]
reg [63:0] uops_15_exc_cause; // @[util.scala:466:20]
reg uops_15_bypassable; // @[util.scala:466:20]
reg [4:0] uops_15_mem_cmd; // @[util.scala:466:20]
reg [1:0] uops_15_mem_size; // @[util.scala:466:20]
reg uops_15_mem_signed; // @[util.scala:466:20]
reg uops_15_is_fence; // @[util.scala:466:20]
reg uops_15_is_fencei; // @[util.scala:466:20]
reg uops_15_is_amo; // @[util.scala:466:20]
reg uops_15_uses_ldq; // @[util.scala:466:20]
reg uops_15_uses_stq; // @[util.scala:466:20]
reg uops_15_is_sys_pc2epc; // @[util.scala:466:20]
reg uops_15_is_unique; // @[util.scala:466:20]
reg uops_15_flush_on_commit; // @[util.scala:466:20]
reg uops_15_ldst_is_rs1; // @[util.scala:466:20]
reg [5:0] uops_15_ldst; // @[util.scala:466:20]
reg [5:0] uops_15_lrs1; // @[util.scala:466:20]
reg [5:0] uops_15_lrs2; // @[util.scala:466:20]
reg [5:0] uops_15_lrs3; // @[util.scala:466:20]
reg uops_15_ldst_val; // @[util.scala:466:20]
reg [1:0] uops_15_dst_rtype; // @[util.scala:466:20]
reg [1:0] uops_15_lrs1_rtype; // @[util.scala:466:20]
reg [1:0] uops_15_lrs2_rtype; // @[util.scala:466:20]
reg uops_15_frs3_en; // @[util.scala:466:20]
reg uops_15_fp_val; // @[util.scala:466:20]
reg uops_15_fp_single; // @[util.scala:466:20]
reg uops_15_xcpt_pf_if; // @[util.scala:466:20]
reg uops_15_xcpt_ae_if; // @[util.scala:466:20]
reg uops_15_xcpt_ma_if; // @[util.scala:466:20]
reg uops_15_bp_debug_if; // @[util.scala:466:20]
reg uops_15_bp_xcpt_if; // @[util.scala:466:20]
reg [1:0] uops_15_debug_fsrc; // @[util.scala:466:20]
reg [1:0] uops_15_debug_tsrc; // @[util.scala:466:20]
reg [3:0] enq_ptr_value; // @[Counter.scala:61:40]
reg [3:0] deq_ptr_value; // @[Counter.scala:61:40]
reg maybe_full; // @[util.scala:470:27]
wire ptr_match = enq_ptr_value == deq_ptr_value; // @[Counter.scala:61:40]
wire _io_empty_T = ~maybe_full; // @[util.scala:470:27, :473:28]
assign _io_empty_T_1 = ptr_match & _io_empty_T; // @[util.scala:472:33, :473:{25,28}]
assign io_empty_0 = _io_empty_T_1; // @[util.scala:448:7, :473:25]
wire _GEN = ptr_match & maybe_full; // @[util.scala:470:27, :472:33, :474:24]
wire full; // @[util.scala:474:24]
assign full = _GEN; // @[util.scala:474:24]
wire _io_count_T; // @[util.scala:526:32]
assign _io_count_T = _GEN; // @[util.scala:474:24, :526:32]
wire _do_enq_T = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35]
wire do_enq = _do_enq_T; // @[Decoupled.scala:51:35]
wire [15:0] _GEN_0 = {{valids_15}, {valids_14}, {valids_13}, {valids_12}, {valids_11}, {valids_10}, {valids_9}, {valids_8}, {valids_7}, {valids_6}, {valids_5}, {valids_4}, {valids_3}, {valids_2}, {valids_1}, {valids_0}}; // @[util.scala:465:24, :476:42]
wire _GEN_1 = _GEN_0[deq_ptr_value]; // @[Counter.scala:61:40]
wire _do_deq_T = ~_GEN_1; // @[util.scala:476:42]
wire _do_deq_T_1 = io_deq_ready_0 | _do_deq_T; // @[util.scala:448:7, :476:{39,42}]
wire _do_deq_T_2 = ~io_empty_0; // @[util.scala:448:7, :476:69]
wire _do_deq_T_3 = _do_deq_T_1 & _do_deq_T_2; // @[util.scala:476:{39,66,69}]
wire do_deq = _do_deq_T_3; // @[util.scala:476:{24,66}]
wire _valids_0_T_6 = _valids_0_T_3; // @[util.scala:481:{29,69}]
wire _valids_1_T_6 = _valids_1_T_3; // @[util.scala:481:{29,69}]
wire _valids_2_T_6 = _valids_2_T_3; // @[util.scala:481:{29,69}]
wire _valids_3_T_6 = _valids_3_T_3; // @[util.scala:481:{29,69}]
wire _valids_4_T_6 = _valids_4_T_3; // @[util.scala:481:{29,69}]
wire _valids_5_T_6 = _valids_5_T_3; // @[util.scala:481:{29,69}]
wire _valids_6_T_6 = _valids_6_T_3; // @[util.scala:481:{29,69}]
wire _valids_7_T_6 = _valids_7_T_3; // @[util.scala:481:{29,69}]
wire _valids_8_T_6 = _valids_8_T_3; // @[util.scala:481:{29,69}]
wire _valids_9_T_6 = _valids_9_T_3; // @[util.scala:481:{29,69}]
wire _valids_10_T_6 = _valids_10_T_3; // @[util.scala:481:{29,69}]
wire _valids_11_T_6 = _valids_11_T_3; // @[util.scala:481:{29,69}]
wire _valids_12_T_6 = _valids_12_T_3; // @[util.scala:481:{29,69}]
wire _valids_13_T_6 = _valids_13_T_3; // @[util.scala:481:{29,69}]
wire _valids_14_T_6 = _valids_14_T_3; // @[util.scala:481:{29,69}]
wire _valids_15_T_6 = _valids_15_T_3; // @[util.scala:481:{29,69}]
wire wrap = &enq_ptr_value; // @[Counter.scala:61:40, :73:24]
wire [4:0] _GEN_2 = {1'h0, enq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [4:0] _value_T = _GEN_2 + 5'h1; // @[Counter.scala:77:24]
wire [3:0] _value_T_1 = _value_T[3:0]; // @[Counter.scala:77:24]
wire wrap_1 = &deq_ptr_value; // @[Counter.scala:61:40, :73:24]
wire [4:0] _GEN_3 = {1'h0, deq_ptr_value}; // @[Counter.scala:61:40, :77:24]
wire [4:0] _value_T_2 = _GEN_3 + 5'h1; // @[Counter.scala:77:24]
wire [3:0] _value_T_3 = _value_T_2[3:0]; // @[Counter.scala:77:24]
assign _io_enq_ready_T = ~full; // @[util.scala:474:24, :504:19]
assign io_enq_ready_0 = _io_enq_ready_T; // @[util.scala:448:7, :504:19]
assign io_deq_bits_uop_uopc_0 = out_uop_uopc; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_inst_0 = out_uop_inst; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_debug_inst_0 = out_uop_debug_inst; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_is_rvc_0 = out_uop_is_rvc; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_debug_pc_0 = out_uop_debug_pc; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_iq_type_0 = out_uop_iq_type; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_fu_code_0 = out_uop_fu_code; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ctrl_br_type_0 = out_uop_ctrl_br_type; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ctrl_op1_sel_0 = out_uop_ctrl_op1_sel; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ctrl_op2_sel_0 = out_uop_ctrl_op2_sel; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ctrl_imm_sel_0 = out_uop_ctrl_imm_sel; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ctrl_op_fcn_0 = out_uop_ctrl_op_fcn; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ctrl_fcn_dw_0 = out_uop_ctrl_fcn_dw; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ctrl_csr_cmd_0 = out_uop_ctrl_csr_cmd; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ctrl_is_load_0 = out_uop_ctrl_is_load; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ctrl_is_sta_0 = out_uop_ctrl_is_sta; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ctrl_is_std_0 = out_uop_ctrl_is_std; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_iw_state_0 = out_uop_iw_state; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_iw_p1_poisoned_0 = out_uop_iw_p1_poisoned; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_iw_p2_poisoned_0 = out_uop_iw_p2_poisoned; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_is_br_0 = out_uop_is_br; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_is_jalr_0 = out_uop_is_jalr; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_is_jal_0 = out_uop_is_jal; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_is_sfb_0 = out_uop_is_sfb; // @[util.scala:448:7, :506:17]
assign _io_deq_bits_uop_br_mask_T_1 = out_uop_br_mask; // @[util.scala:85:25, :506:17]
assign io_deq_bits_uop_br_tag_0 = out_uop_br_tag; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ftq_idx_0 = out_uop_ftq_idx; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_edge_inst_0 = out_uop_edge_inst; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_pc_lob_0 = out_uop_pc_lob; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_taken_0 = out_uop_taken; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_imm_packed_0 = out_uop_imm_packed; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_csr_addr_0 = out_uop_csr_addr; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_rob_idx_0 = out_uop_rob_idx; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ldq_idx_0 = out_uop_ldq_idx; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_stq_idx_0 = out_uop_stq_idx; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_rxq_idx_0 = out_uop_rxq_idx; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_pdst_0 = out_uop_pdst; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_prs1_0 = out_uop_prs1; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_prs2_0 = out_uop_prs2; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_prs3_0 = out_uop_prs3; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ppred_0 = out_uop_ppred; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_prs1_busy_0 = out_uop_prs1_busy; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_prs2_busy_0 = out_uop_prs2_busy; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_prs3_busy_0 = out_uop_prs3_busy; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ppred_busy_0 = out_uop_ppred_busy; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_stale_pdst_0 = out_uop_stale_pdst; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_exception_0 = out_uop_exception; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_exc_cause_0 = out_uop_exc_cause; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_bypassable_0 = out_uop_bypassable; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_mem_cmd_0 = out_uop_mem_cmd; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_mem_size_0 = out_uop_mem_size; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_mem_signed_0 = out_uop_mem_signed; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_is_fence_0 = out_uop_is_fence; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_is_fencei_0 = out_uop_is_fencei; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_is_amo_0 = out_uop_is_amo; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_uses_ldq_0 = out_uop_uses_ldq; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_uses_stq_0 = out_uop_uses_stq; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_is_sys_pc2epc_0 = out_uop_is_sys_pc2epc; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_is_unique_0 = out_uop_is_unique; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_flush_on_commit_0 = out_uop_flush_on_commit; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ldst_is_rs1_0 = out_uop_ldst_is_rs1; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ldst_0 = out_uop_ldst; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_lrs1_0 = out_uop_lrs1; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_lrs2_0 = out_uop_lrs2; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_lrs3_0 = out_uop_lrs3; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_ldst_val_0 = out_uop_ldst_val; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_dst_rtype_0 = out_uop_dst_rtype; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_lrs1_rtype_0 = out_uop_lrs1_rtype; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_lrs2_rtype_0 = out_uop_lrs2_rtype; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_frs3_en_0 = out_uop_frs3_en; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_fp_val_0 = out_uop_fp_val; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_fp_single_0 = out_uop_fp_single; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_xcpt_pf_if_0 = out_uop_xcpt_pf_if; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_xcpt_ae_if_0 = out_uop_xcpt_ae_if; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_xcpt_ma_if_0 = out_uop_xcpt_ma_if; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_bp_debug_if_0 = out_uop_bp_debug_if; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_bp_xcpt_if_0 = out_uop_bp_xcpt_if; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_debug_fsrc_0 = out_uop_debug_fsrc; // @[util.scala:448:7, :506:17]
assign io_deq_bits_uop_debug_tsrc_0 = out_uop_debug_tsrc; // @[util.scala:448:7, :506:17]
assign io_deq_bits_addr_0 = out_addr; // @[util.scala:448:7, :506:17]
assign io_deq_bits_data_0 = out_data; // @[util.scala:448:7, :506:17]
assign io_deq_bits_is_hella_0 = out_is_hella; // @[util.scala:448:7, :506:17]
assign io_deq_bits_tag_match_0 = out_tag_match; // @[util.scala:448:7, :506:17]
assign io_deq_bits_old_meta_coh_state_0 = out_old_meta_coh_state; // @[util.scala:448:7, :506:17]
assign io_deq_bits_old_meta_tag_0 = out_old_meta_tag; // @[util.scala:448:7, :506:17]
assign io_deq_bits_way_en = out_way_en; // @[util.scala:448:7, :506:17]
assign io_deq_bits_sdq_id_0 = out_sdq_id; // @[util.scala:448:7, :506:17]
wire [15:0][6:0] _GEN_4 = {{uops_15_uopc}, {uops_14_uopc}, {uops_13_uopc}, {uops_12_uopc}, {uops_11_uopc}, {uops_10_uopc}, {uops_9_uopc}, {uops_8_uopc}, {uops_7_uopc}, {uops_6_uopc}, {uops_5_uopc}, {uops_4_uopc}, {uops_3_uopc}, {uops_2_uopc}, {uops_1_uopc}, {uops_0_uopc}}; // @[util.scala:466:20, :508:19]
assign out_uop_uopc = _GEN_4[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][31:0] _GEN_5 = {{uops_15_inst}, {uops_14_inst}, {uops_13_inst}, {uops_12_inst}, {uops_11_inst}, {uops_10_inst}, {uops_9_inst}, {uops_8_inst}, {uops_7_inst}, {uops_6_inst}, {uops_5_inst}, {uops_4_inst}, {uops_3_inst}, {uops_2_inst}, {uops_1_inst}, {uops_0_inst}}; // @[util.scala:466:20, :508:19]
assign out_uop_inst = _GEN_5[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][31:0] _GEN_6 = {{uops_15_debug_inst}, {uops_14_debug_inst}, {uops_13_debug_inst}, {uops_12_debug_inst}, {uops_11_debug_inst}, {uops_10_debug_inst}, {uops_9_debug_inst}, {uops_8_debug_inst}, {uops_7_debug_inst}, {uops_6_debug_inst}, {uops_5_debug_inst}, {uops_4_debug_inst}, {uops_3_debug_inst}, {uops_2_debug_inst}, {uops_1_debug_inst}, {uops_0_debug_inst}}; // @[util.scala:466:20, :508:19]
assign out_uop_debug_inst = _GEN_6[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_7 = {{uops_15_is_rvc}, {uops_14_is_rvc}, {uops_13_is_rvc}, {uops_12_is_rvc}, {uops_11_is_rvc}, {uops_10_is_rvc}, {uops_9_is_rvc}, {uops_8_is_rvc}, {uops_7_is_rvc}, {uops_6_is_rvc}, {uops_5_is_rvc}, {uops_4_is_rvc}, {uops_3_is_rvc}, {uops_2_is_rvc}, {uops_1_is_rvc}, {uops_0_is_rvc}}; // @[util.scala:466:20, :508:19]
assign out_uop_is_rvc = _GEN_7[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][33:0] _GEN_8 = {{uops_15_debug_pc}, {uops_14_debug_pc}, {uops_13_debug_pc}, {uops_12_debug_pc}, {uops_11_debug_pc}, {uops_10_debug_pc}, {uops_9_debug_pc}, {uops_8_debug_pc}, {uops_7_debug_pc}, {uops_6_debug_pc}, {uops_5_debug_pc}, {uops_4_debug_pc}, {uops_3_debug_pc}, {uops_2_debug_pc}, {uops_1_debug_pc}, {uops_0_debug_pc}}; // @[util.scala:466:20, :508:19]
assign out_uop_debug_pc = _GEN_8[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_9 = {{uops_15_iq_type}, {uops_14_iq_type}, {uops_13_iq_type}, {uops_12_iq_type}, {uops_11_iq_type}, {uops_10_iq_type}, {uops_9_iq_type}, {uops_8_iq_type}, {uops_7_iq_type}, {uops_6_iq_type}, {uops_5_iq_type}, {uops_4_iq_type}, {uops_3_iq_type}, {uops_2_iq_type}, {uops_1_iq_type}, {uops_0_iq_type}}; // @[util.scala:466:20, :508:19]
assign out_uop_iq_type = _GEN_9[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][9:0] _GEN_10 = {{uops_15_fu_code}, {uops_14_fu_code}, {uops_13_fu_code}, {uops_12_fu_code}, {uops_11_fu_code}, {uops_10_fu_code}, {uops_9_fu_code}, {uops_8_fu_code}, {uops_7_fu_code}, {uops_6_fu_code}, {uops_5_fu_code}, {uops_4_fu_code}, {uops_3_fu_code}, {uops_2_fu_code}, {uops_1_fu_code}, {uops_0_fu_code}}; // @[util.scala:466:20, :508:19]
assign out_uop_fu_code = _GEN_10[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_11 = {{uops_15_ctrl_br_type}, {uops_14_ctrl_br_type}, {uops_13_ctrl_br_type}, {uops_12_ctrl_br_type}, {uops_11_ctrl_br_type}, {uops_10_ctrl_br_type}, {uops_9_ctrl_br_type}, {uops_8_ctrl_br_type}, {uops_7_ctrl_br_type}, {uops_6_ctrl_br_type}, {uops_5_ctrl_br_type}, {uops_4_ctrl_br_type}, {uops_3_ctrl_br_type}, {uops_2_ctrl_br_type}, {uops_1_ctrl_br_type}, {uops_0_ctrl_br_type}}; // @[util.scala:466:20, :508:19]
assign out_uop_ctrl_br_type = _GEN_11[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_12 = {{uops_15_ctrl_op1_sel}, {uops_14_ctrl_op1_sel}, {uops_13_ctrl_op1_sel}, {uops_12_ctrl_op1_sel}, {uops_11_ctrl_op1_sel}, {uops_10_ctrl_op1_sel}, {uops_9_ctrl_op1_sel}, {uops_8_ctrl_op1_sel}, {uops_7_ctrl_op1_sel}, {uops_6_ctrl_op1_sel}, {uops_5_ctrl_op1_sel}, {uops_4_ctrl_op1_sel}, {uops_3_ctrl_op1_sel}, {uops_2_ctrl_op1_sel}, {uops_1_ctrl_op1_sel}, {uops_0_ctrl_op1_sel}}; // @[util.scala:466:20, :508:19]
assign out_uop_ctrl_op1_sel = _GEN_12[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_13 = {{uops_15_ctrl_op2_sel}, {uops_14_ctrl_op2_sel}, {uops_13_ctrl_op2_sel}, {uops_12_ctrl_op2_sel}, {uops_11_ctrl_op2_sel}, {uops_10_ctrl_op2_sel}, {uops_9_ctrl_op2_sel}, {uops_8_ctrl_op2_sel}, {uops_7_ctrl_op2_sel}, {uops_6_ctrl_op2_sel}, {uops_5_ctrl_op2_sel}, {uops_4_ctrl_op2_sel}, {uops_3_ctrl_op2_sel}, {uops_2_ctrl_op2_sel}, {uops_1_ctrl_op2_sel}, {uops_0_ctrl_op2_sel}}; // @[util.scala:466:20, :508:19]
assign out_uop_ctrl_op2_sel = _GEN_13[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_14 = {{uops_15_ctrl_imm_sel}, {uops_14_ctrl_imm_sel}, {uops_13_ctrl_imm_sel}, {uops_12_ctrl_imm_sel}, {uops_11_ctrl_imm_sel}, {uops_10_ctrl_imm_sel}, {uops_9_ctrl_imm_sel}, {uops_8_ctrl_imm_sel}, {uops_7_ctrl_imm_sel}, {uops_6_ctrl_imm_sel}, {uops_5_ctrl_imm_sel}, {uops_4_ctrl_imm_sel}, {uops_3_ctrl_imm_sel}, {uops_2_ctrl_imm_sel}, {uops_1_ctrl_imm_sel}, {uops_0_ctrl_imm_sel}}; // @[util.scala:466:20, :508:19]
assign out_uop_ctrl_imm_sel = _GEN_14[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_15 = {{uops_15_ctrl_op_fcn}, {uops_14_ctrl_op_fcn}, {uops_13_ctrl_op_fcn}, {uops_12_ctrl_op_fcn}, {uops_11_ctrl_op_fcn}, {uops_10_ctrl_op_fcn}, {uops_9_ctrl_op_fcn}, {uops_8_ctrl_op_fcn}, {uops_7_ctrl_op_fcn}, {uops_6_ctrl_op_fcn}, {uops_5_ctrl_op_fcn}, {uops_4_ctrl_op_fcn}, {uops_3_ctrl_op_fcn}, {uops_2_ctrl_op_fcn}, {uops_1_ctrl_op_fcn}, {uops_0_ctrl_op_fcn}}; // @[util.scala:466:20, :508:19]
assign out_uop_ctrl_op_fcn = _GEN_15[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_16 = {{uops_15_ctrl_fcn_dw}, {uops_14_ctrl_fcn_dw}, {uops_13_ctrl_fcn_dw}, {uops_12_ctrl_fcn_dw}, {uops_11_ctrl_fcn_dw}, {uops_10_ctrl_fcn_dw}, {uops_9_ctrl_fcn_dw}, {uops_8_ctrl_fcn_dw}, {uops_7_ctrl_fcn_dw}, {uops_6_ctrl_fcn_dw}, {uops_5_ctrl_fcn_dw}, {uops_4_ctrl_fcn_dw}, {uops_3_ctrl_fcn_dw}, {uops_2_ctrl_fcn_dw}, {uops_1_ctrl_fcn_dw}, {uops_0_ctrl_fcn_dw}}; // @[util.scala:466:20, :508:19]
assign out_uop_ctrl_fcn_dw = _GEN_16[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][2:0] _GEN_17 = {{uops_15_ctrl_csr_cmd}, {uops_14_ctrl_csr_cmd}, {uops_13_ctrl_csr_cmd}, {uops_12_ctrl_csr_cmd}, {uops_11_ctrl_csr_cmd}, {uops_10_ctrl_csr_cmd}, {uops_9_ctrl_csr_cmd}, {uops_8_ctrl_csr_cmd}, {uops_7_ctrl_csr_cmd}, {uops_6_ctrl_csr_cmd}, {uops_5_ctrl_csr_cmd}, {uops_4_ctrl_csr_cmd}, {uops_3_ctrl_csr_cmd}, {uops_2_ctrl_csr_cmd}, {uops_1_ctrl_csr_cmd}, {uops_0_ctrl_csr_cmd}}; // @[util.scala:466:20, :508:19]
assign out_uop_ctrl_csr_cmd = _GEN_17[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_18 = {{uops_15_ctrl_is_load}, {uops_14_ctrl_is_load}, {uops_13_ctrl_is_load}, {uops_12_ctrl_is_load}, {uops_11_ctrl_is_load}, {uops_10_ctrl_is_load}, {uops_9_ctrl_is_load}, {uops_8_ctrl_is_load}, {uops_7_ctrl_is_load}, {uops_6_ctrl_is_load}, {uops_5_ctrl_is_load}, {uops_4_ctrl_is_load}, {uops_3_ctrl_is_load}, {uops_2_ctrl_is_load}, {uops_1_ctrl_is_load}, {uops_0_ctrl_is_load}}; // @[util.scala:466:20, :508:19]
assign out_uop_ctrl_is_load = _GEN_18[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_19 = {{uops_15_ctrl_is_sta}, {uops_14_ctrl_is_sta}, {uops_13_ctrl_is_sta}, {uops_12_ctrl_is_sta}, {uops_11_ctrl_is_sta}, {uops_10_ctrl_is_sta}, {uops_9_ctrl_is_sta}, {uops_8_ctrl_is_sta}, {uops_7_ctrl_is_sta}, {uops_6_ctrl_is_sta}, {uops_5_ctrl_is_sta}, {uops_4_ctrl_is_sta}, {uops_3_ctrl_is_sta}, {uops_2_ctrl_is_sta}, {uops_1_ctrl_is_sta}, {uops_0_ctrl_is_sta}}; // @[util.scala:466:20, :508:19]
assign out_uop_ctrl_is_sta = _GEN_19[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_20 = {{uops_15_ctrl_is_std}, {uops_14_ctrl_is_std}, {uops_13_ctrl_is_std}, {uops_12_ctrl_is_std}, {uops_11_ctrl_is_std}, {uops_10_ctrl_is_std}, {uops_9_ctrl_is_std}, {uops_8_ctrl_is_std}, {uops_7_ctrl_is_std}, {uops_6_ctrl_is_std}, {uops_5_ctrl_is_std}, {uops_4_ctrl_is_std}, {uops_3_ctrl_is_std}, {uops_2_ctrl_is_std}, {uops_1_ctrl_is_std}, {uops_0_ctrl_is_std}}; // @[util.scala:466:20, :508:19]
assign out_uop_ctrl_is_std = _GEN_20[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_21 = {{uops_15_iw_state}, {uops_14_iw_state}, {uops_13_iw_state}, {uops_12_iw_state}, {uops_11_iw_state}, {uops_10_iw_state}, {uops_9_iw_state}, {uops_8_iw_state}, {uops_7_iw_state}, {uops_6_iw_state}, {uops_5_iw_state}, {uops_4_iw_state}, {uops_3_iw_state}, {uops_2_iw_state}, {uops_1_iw_state}, {uops_0_iw_state}}; // @[util.scala:466:20, :508:19]
assign out_uop_iw_state = _GEN_21[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_22 = {{uops_15_iw_p1_poisoned}, {uops_14_iw_p1_poisoned}, {uops_13_iw_p1_poisoned}, {uops_12_iw_p1_poisoned}, {uops_11_iw_p1_poisoned}, {uops_10_iw_p1_poisoned}, {uops_9_iw_p1_poisoned}, {uops_8_iw_p1_poisoned}, {uops_7_iw_p1_poisoned}, {uops_6_iw_p1_poisoned}, {uops_5_iw_p1_poisoned}, {uops_4_iw_p1_poisoned}, {uops_3_iw_p1_poisoned}, {uops_2_iw_p1_poisoned}, {uops_1_iw_p1_poisoned}, {uops_0_iw_p1_poisoned}}; // @[util.scala:466:20, :508:19]
assign out_uop_iw_p1_poisoned = _GEN_22[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_23 = {{uops_15_iw_p2_poisoned}, {uops_14_iw_p2_poisoned}, {uops_13_iw_p2_poisoned}, {uops_12_iw_p2_poisoned}, {uops_11_iw_p2_poisoned}, {uops_10_iw_p2_poisoned}, {uops_9_iw_p2_poisoned}, {uops_8_iw_p2_poisoned}, {uops_7_iw_p2_poisoned}, {uops_6_iw_p2_poisoned}, {uops_5_iw_p2_poisoned}, {uops_4_iw_p2_poisoned}, {uops_3_iw_p2_poisoned}, {uops_2_iw_p2_poisoned}, {uops_1_iw_p2_poisoned}, {uops_0_iw_p2_poisoned}}; // @[util.scala:466:20, :508:19]
assign out_uop_iw_p2_poisoned = _GEN_23[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_24 = {{uops_15_is_br}, {uops_14_is_br}, {uops_13_is_br}, {uops_12_is_br}, {uops_11_is_br}, {uops_10_is_br}, {uops_9_is_br}, {uops_8_is_br}, {uops_7_is_br}, {uops_6_is_br}, {uops_5_is_br}, {uops_4_is_br}, {uops_3_is_br}, {uops_2_is_br}, {uops_1_is_br}, {uops_0_is_br}}; // @[util.scala:466:20, :508:19]
assign out_uop_is_br = _GEN_24[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_25 = {{uops_15_is_jalr}, {uops_14_is_jalr}, {uops_13_is_jalr}, {uops_12_is_jalr}, {uops_11_is_jalr}, {uops_10_is_jalr}, {uops_9_is_jalr}, {uops_8_is_jalr}, {uops_7_is_jalr}, {uops_6_is_jalr}, {uops_5_is_jalr}, {uops_4_is_jalr}, {uops_3_is_jalr}, {uops_2_is_jalr}, {uops_1_is_jalr}, {uops_0_is_jalr}}; // @[util.scala:466:20, :508:19]
assign out_uop_is_jalr = _GEN_25[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_26 = {{uops_15_is_jal}, {uops_14_is_jal}, {uops_13_is_jal}, {uops_12_is_jal}, {uops_11_is_jal}, {uops_10_is_jal}, {uops_9_is_jal}, {uops_8_is_jal}, {uops_7_is_jal}, {uops_6_is_jal}, {uops_5_is_jal}, {uops_4_is_jal}, {uops_3_is_jal}, {uops_2_is_jal}, {uops_1_is_jal}, {uops_0_is_jal}}; // @[util.scala:466:20, :508:19]
assign out_uop_is_jal = _GEN_26[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_27 = {{uops_15_is_sfb}, {uops_14_is_sfb}, {uops_13_is_sfb}, {uops_12_is_sfb}, {uops_11_is_sfb}, {uops_10_is_sfb}, {uops_9_is_sfb}, {uops_8_is_sfb}, {uops_7_is_sfb}, {uops_6_is_sfb}, {uops_5_is_sfb}, {uops_4_is_sfb}, {uops_3_is_sfb}, {uops_2_is_sfb}, {uops_1_is_sfb}, {uops_0_is_sfb}}; // @[util.scala:466:20, :508:19]
assign out_uop_is_sfb = _GEN_27[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_28 = {{uops_15_br_mask}, {uops_14_br_mask}, {uops_13_br_mask}, {uops_12_br_mask}, {uops_11_br_mask}, {uops_10_br_mask}, {uops_9_br_mask}, {uops_8_br_mask}, {uops_7_br_mask}, {uops_6_br_mask}, {uops_5_br_mask}, {uops_4_br_mask}, {uops_3_br_mask}, {uops_2_br_mask}, {uops_1_br_mask}, {uops_0_br_mask}}; // @[util.scala:466:20, :508:19]
assign out_uop_br_mask = _GEN_28[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_29 = {{uops_15_br_tag}, {uops_14_br_tag}, {uops_13_br_tag}, {uops_12_br_tag}, {uops_11_br_tag}, {uops_10_br_tag}, {uops_9_br_tag}, {uops_8_br_tag}, {uops_7_br_tag}, {uops_6_br_tag}, {uops_5_br_tag}, {uops_4_br_tag}, {uops_3_br_tag}, {uops_2_br_tag}, {uops_1_br_tag}, {uops_0_br_tag}}; // @[util.scala:466:20, :508:19]
assign out_uop_br_tag = _GEN_29[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_30 = {{uops_15_ftq_idx}, {uops_14_ftq_idx}, {uops_13_ftq_idx}, {uops_12_ftq_idx}, {uops_11_ftq_idx}, {uops_10_ftq_idx}, {uops_9_ftq_idx}, {uops_8_ftq_idx}, {uops_7_ftq_idx}, {uops_6_ftq_idx}, {uops_5_ftq_idx}, {uops_4_ftq_idx}, {uops_3_ftq_idx}, {uops_2_ftq_idx}, {uops_1_ftq_idx}, {uops_0_ftq_idx}}; // @[util.scala:466:20, :508:19]
assign out_uop_ftq_idx = _GEN_30[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_31 = {{uops_15_edge_inst}, {uops_14_edge_inst}, {uops_13_edge_inst}, {uops_12_edge_inst}, {uops_11_edge_inst}, {uops_10_edge_inst}, {uops_9_edge_inst}, {uops_8_edge_inst}, {uops_7_edge_inst}, {uops_6_edge_inst}, {uops_5_edge_inst}, {uops_4_edge_inst}, {uops_3_edge_inst}, {uops_2_edge_inst}, {uops_1_edge_inst}, {uops_0_edge_inst}}; // @[util.scala:466:20, :508:19]
assign out_uop_edge_inst = _GEN_31[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_32 = {{uops_15_pc_lob}, {uops_14_pc_lob}, {uops_13_pc_lob}, {uops_12_pc_lob}, {uops_11_pc_lob}, {uops_10_pc_lob}, {uops_9_pc_lob}, {uops_8_pc_lob}, {uops_7_pc_lob}, {uops_6_pc_lob}, {uops_5_pc_lob}, {uops_4_pc_lob}, {uops_3_pc_lob}, {uops_2_pc_lob}, {uops_1_pc_lob}, {uops_0_pc_lob}}; // @[util.scala:466:20, :508:19]
assign out_uop_pc_lob = _GEN_32[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_33 = {{uops_15_taken}, {uops_14_taken}, {uops_13_taken}, {uops_12_taken}, {uops_11_taken}, {uops_10_taken}, {uops_9_taken}, {uops_8_taken}, {uops_7_taken}, {uops_6_taken}, {uops_5_taken}, {uops_4_taken}, {uops_3_taken}, {uops_2_taken}, {uops_1_taken}, {uops_0_taken}}; // @[util.scala:466:20, :508:19]
assign out_uop_taken = _GEN_33[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][19:0] _GEN_34 = {{uops_15_imm_packed}, {uops_14_imm_packed}, {uops_13_imm_packed}, {uops_12_imm_packed}, {uops_11_imm_packed}, {uops_10_imm_packed}, {uops_9_imm_packed}, {uops_8_imm_packed}, {uops_7_imm_packed}, {uops_6_imm_packed}, {uops_5_imm_packed}, {uops_4_imm_packed}, {uops_3_imm_packed}, {uops_2_imm_packed}, {uops_1_imm_packed}, {uops_0_imm_packed}}; // @[util.scala:466:20, :508:19]
assign out_uop_imm_packed = _GEN_34[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][11:0] _GEN_35 = {{uops_15_csr_addr}, {uops_14_csr_addr}, {uops_13_csr_addr}, {uops_12_csr_addr}, {uops_11_csr_addr}, {uops_10_csr_addr}, {uops_9_csr_addr}, {uops_8_csr_addr}, {uops_7_csr_addr}, {uops_6_csr_addr}, {uops_5_csr_addr}, {uops_4_csr_addr}, {uops_3_csr_addr}, {uops_2_csr_addr}, {uops_1_csr_addr}, {uops_0_csr_addr}}; // @[util.scala:466:20, :508:19]
assign out_uop_csr_addr = _GEN_35[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_36 = {{uops_15_rob_idx}, {uops_14_rob_idx}, {uops_13_rob_idx}, {uops_12_rob_idx}, {uops_11_rob_idx}, {uops_10_rob_idx}, {uops_9_rob_idx}, {uops_8_rob_idx}, {uops_7_rob_idx}, {uops_6_rob_idx}, {uops_5_rob_idx}, {uops_4_rob_idx}, {uops_3_rob_idx}, {uops_2_rob_idx}, {uops_1_rob_idx}, {uops_0_rob_idx}}; // @[util.scala:466:20, :508:19]
assign out_uop_rob_idx = _GEN_36[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_37 = {{uops_15_ldq_idx}, {uops_14_ldq_idx}, {uops_13_ldq_idx}, {uops_12_ldq_idx}, {uops_11_ldq_idx}, {uops_10_ldq_idx}, {uops_9_ldq_idx}, {uops_8_ldq_idx}, {uops_7_ldq_idx}, {uops_6_ldq_idx}, {uops_5_ldq_idx}, {uops_4_ldq_idx}, {uops_3_ldq_idx}, {uops_2_ldq_idx}, {uops_1_ldq_idx}, {uops_0_ldq_idx}}; // @[util.scala:466:20, :508:19]
assign out_uop_ldq_idx = _GEN_37[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_38 = {{uops_15_stq_idx}, {uops_14_stq_idx}, {uops_13_stq_idx}, {uops_12_stq_idx}, {uops_11_stq_idx}, {uops_10_stq_idx}, {uops_9_stq_idx}, {uops_8_stq_idx}, {uops_7_stq_idx}, {uops_6_stq_idx}, {uops_5_stq_idx}, {uops_4_stq_idx}, {uops_3_stq_idx}, {uops_2_stq_idx}, {uops_1_stq_idx}, {uops_0_stq_idx}}; // @[util.scala:466:20, :508:19]
assign out_uop_stq_idx = _GEN_38[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_39 = {{uops_15_rxq_idx}, {uops_14_rxq_idx}, {uops_13_rxq_idx}, {uops_12_rxq_idx}, {uops_11_rxq_idx}, {uops_10_rxq_idx}, {uops_9_rxq_idx}, {uops_8_rxq_idx}, {uops_7_rxq_idx}, {uops_6_rxq_idx}, {uops_5_rxq_idx}, {uops_4_rxq_idx}, {uops_3_rxq_idx}, {uops_2_rxq_idx}, {uops_1_rxq_idx}, {uops_0_rxq_idx}}; // @[util.scala:466:20, :508:19]
assign out_uop_rxq_idx = _GEN_39[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][6:0] _GEN_40 = {{uops_15_pdst}, {uops_14_pdst}, {uops_13_pdst}, {uops_12_pdst}, {uops_11_pdst}, {uops_10_pdst}, {uops_9_pdst}, {uops_8_pdst}, {uops_7_pdst}, {uops_6_pdst}, {uops_5_pdst}, {uops_4_pdst}, {uops_3_pdst}, {uops_2_pdst}, {uops_1_pdst}, {uops_0_pdst}}; // @[util.scala:466:20, :508:19]
assign out_uop_pdst = _GEN_40[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][6:0] _GEN_41 = {{uops_15_prs1}, {uops_14_prs1}, {uops_13_prs1}, {uops_12_prs1}, {uops_11_prs1}, {uops_10_prs1}, {uops_9_prs1}, {uops_8_prs1}, {uops_7_prs1}, {uops_6_prs1}, {uops_5_prs1}, {uops_4_prs1}, {uops_3_prs1}, {uops_2_prs1}, {uops_1_prs1}, {uops_0_prs1}}; // @[util.scala:466:20, :508:19]
assign out_uop_prs1 = _GEN_41[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][6:0] _GEN_42 = {{uops_15_prs2}, {uops_14_prs2}, {uops_13_prs2}, {uops_12_prs2}, {uops_11_prs2}, {uops_10_prs2}, {uops_9_prs2}, {uops_8_prs2}, {uops_7_prs2}, {uops_6_prs2}, {uops_5_prs2}, {uops_4_prs2}, {uops_3_prs2}, {uops_2_prs2}, {uops_1_prs2}, {uops_0_prs2}}; // @[util.scala:466:20, :508:19]
assign out_uop_prs2 = _GEN_42[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][6:0] _GEN_43 = {{uops_15_prs3}, {uops_14_prs3}, {uops_13_prs3}, {uops_12_prs3}, {uops_11_prs3}, {uops_10_prs3}, {uops_9_prs3}, {uops_8_prs3}, {uops_7_prs3}, {uops_6_prs3}, {uops_5_prs3}, {uops_4_prs3}, {uops_3_prs3}, {uops_2_prs3}, {uops_1_prs3}, {uops_0_prs3}}; // @[util.scala:466:20, :508:19]
assign out_uop_prs3 = _GEN_43[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][3:0] _GEN_44 = {{uops_15_ppred}, {uops_14_ppred}, {uops_13_ppred}, {uops_12_ppred}, {uops_11_ppred}, {uops_10_ppred}, {uops_9_ppred}, {uops_8_ppred}, {uops_7_ppred}, {uops_6_ppred}, {uops_5_ppred}, {uops_4_ppred}, {uops_3_ppred}, {uops_2_ppred}, {uops_1_ppred}, {uops_0_ppred}}; // @[util.scala:466:20, :508:19]
assign out_uop_ppred = _GEN_44[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_45 = {{uops_15_prs1_busy}, {uops_14_prs1_busy}, {uops_13_prs1_busy}, {uops_12_prs1_busy}, {uops_11_prs1_busy}, {uops_10_prs1_busy}, {uops_9_prs1_busy}, {uops_8_prs1_busy}, {uops_7_prs1_busy}, {uops_6_prs1_busy}, {uops_5_prs1_busy}, {uops_4_prs1_busy}, {uops_3_prs1_busy}, {uops_2_prs1_busy}, {uops_1_prs1_busy}, {uops_0_prs1_busy}}; // @[util.scala:466:20, :508:19]
assign out_uop_prs1_busy = _GEN_45[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_46 = {{uops_15_prs2_busy}, {uops_14_prs2_busy}, {uops_13_prs2_busy}, {uops_12_prs2_busy}, {uops_11_prs2_busy}, {uops_10_prs2_busy}, {uops_9_prs2_busy}, {uops_8_prs2_busy}, {uops_7_prs2_busy}, {uops_6_prs2_busy}, {uops_5_prs2_busy}, {uops_4_prs2_busy}, {uops_3_prs2_busy}, {uops_2_prs2_busy}, {uops_1_prs2_busy}, {uops_0_prs2_busy}}; // @[util.scala:466:20, :508:19]
assign out_uop_prs2_busy = _GEN_46[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_47 = {{uops_15_prs3_busy}, {uops_14_prs3_busy}, {uops_13_prs3_busy}, {uops_12_prs3_busy}, {uops_11_prs3_busy}, {uops_10_prs3_busy}, {uops_9_prs3_busy}, {uops_8_prs3_busy}, {uops_7_prs3_busy}, {uops_6_prs3_busy}, {uops_5_prs3_busy}, {uops_4_prs3_busy}, {uops_3_prs3_busy}, {uops_2_prs3_busy}, {uops_1_prs3_busy}, {uops_0_prs3_busy}}; // @[util.scala:466:20, :508:19]
assign out_uop_prs3_busy = _GEN_47[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_48 = {{uops_15_ppred_busy}, {uops_14_ppred_busy}, {uops_13_ppred_busy}, {uops_12_ppred_busy}, {uops_11_ppred_busy}, {uops_10_ppred_busy}, {uops_9_ppred_busy}, {uops_8_ppred_busy}, {uops_7_ppred_busy}, {uops_6_ppred_busy}, {uops_5_ppred_busy}, {uops_4_ppred_busy}, {uops_3_ppred_busy}, {uops_2_ppred_busy}, {uops_1_ppred_busy}, {uops_0_ppred_busy}}; // @[util.scala:466:20, :508:19]
assign out_uop_ppred_busy = _GEN_48[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][6:0] _GEN_49 = {{uops_15_stale_pdst}, {uops_14_stale_pdst}, {uops_13_stale_pdst}, {uops_12_stale_pdst}, {uops_11_stale_pdst}, {uops_10_stale_pdst}, {uops_9_stale_pdst}, {uops_8_stale_pdst}, {uops_7_stale_pdst}, {uops_6_stale_pdst}, {uops_5_stale_pdst}, {uops_4_stale_pdst}, {uops_3_stale_pdst}, {uops_2_stale_pdst}, {uops_1_stale_pdst}, {uops_0_stale_pdst}}; // @[util.scala:466:20, :508:19]
assign out_uop_stale_pdst = _GEN_49[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_50 = {{uops_15_exception}, {uops_14_exception}, {uops_13_exception}, {uops_12_exception}, {uops_11_exception}, {uops_10_exception}, {uops_9_exception}, {uops_8_exception}, {uops_7_exception}, {uops_6_exception}, {uops_5_exception}, {uops_4_exception}, {uops_3_exception}, {uops_2_exception}, {uops_1_exception}, {uops_0_exception}}; // @[util.scala:466:20, :508:19]
assign out_uop_exception = _GEN_50[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][63:0] _GEN_51 = {{uops_15_exc_cause}, {uops_14_exc_cause}, {uops_13_exc_cause}, {uops_12_exc_cause}, {uops_11_exc_cause}, {uops_10_exc_cause}, {uops_9_exc_cause}, {uops_8_exc_cause}, {uops_7_exc_cause}, {uops_6_exc_cause}, {uops_5_exc_cause}, {uops_4_exc_cause}, {uops_3_exc_cause}, {uops_2_exc_cause}, {uops_1_exc_cause}, {uops_0_exc_cause}}; // @[util.scala:466:20, :508:19]
assign out_uop_exc_cause = _GEN_51[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_52 = {{uops_15_bypassable}, {uops_14_bypassable}, {uops_13_bypassable}, {uops_12_bypassable}, {uops_11_bypassable}, {uops_10_bypassable}, {uops_9_bypassable}, {uops_8_bypassable}, {uops_7_bypassable}, {uops_6_bypassable}, {uops_5_bypassable}, {uops_4_bypassable}, {uops_3_bypassable}, {uops_2_bypassable}, {uops_1_bypassable}, {uops_0_bypassable}}; // @[util.scala:466:20, :508:19]
assign out_uop_bypassable = _GEN_52[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][4:0] _GEN_53 = {{uops_15_mem_cmd}, {uops_14_mem_cmd}, {uops_13_mem_cmd}, {uops_12_mem_cmd}, {uops_11_mem_cmd}, {uops_10_mem_cmd}, {uops_9_mem_cmd}, {uops_8_mem_cmd}, {uops_7_mem_cmd}, {uops_6_mem_cmd}, {uops_5_mem_cmd}, {uops_4_mem_cmd}, {uops_3_mem_cmd}, {uops_2_mem_cmd}, {uops_1_mem_cmd}, {uops_0_mem_cmd}}; // @[util.scala:466:20, :508:19]
assign out_uop_mem_cmd = _GEN_53[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_54 = {{uops_15_mem_size}, {uops_14_mem_size}, {uops_13_mem_size}, {uops_12_mem_size}, {uops_11_mem_size}, {uops_10_mem_size}, {uops_9_mem_size}, {uops_8_mem_size}, {uops_7_mem_size}, {uops_6_mem_size}, {uops_5_mem_size}, {uops_4_mem_size}, {uops_3_mem_size}, {uops_2_mem_size}, {uops_1_mem_size}, {uops_0_mem_size}}; // @[util.scala:466:20, :508:19]
assign out_uop_mem_size = _GEN_54[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_55 = {{uops_15_mem_signed}, {uops_14_mem_signed}, {uops_13_mem_signed}, {uops_12_mem_signed}, {uops_11_mem_signed}, {uops_10_mem_signed}, {uops_9_mem_signed}, {uops_8_mem_signed}, {uops_7_mem_signed}, {uops_6_mem_signed}, {uops_5_mem_signed}, {uops_4_mem_signed}, {uops_3_mem_signed}, {uops_2_mem_signed}, {uops_1_mem_signed}, {uops_0_mem_signed}}; // @[util.scala:466:20, :508:19]
assign out_uop_mem_signed = _GEN_55[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_56 = {{uops_15_is_fence}, {uops_14_is_fence}, {uops_13_is_fence}, {uops_12_is_fence}, {uops_11_is_fence}, {uops_10_is_fence}, {uops_9_is_fence}, {uops_8_is_fence}, {uops_7_is_fence}, {uops_6_is_fence}, {uops_5_is_fence}, {uops_4_is_fence}, {uops_3_is_fence}, {uops_2_is_fence}, {uops_1_is_fence}, {uops_0_is_fence}}; // @[util.scala:466:20, :508:19]
assign out_uop_is_fence = _GEN_56[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_57 = {{uops_15_is_fencei}, {uops_14_is_fencei}, {uops_13_is_fencei}, {uops_12_is_fencei}, {uops_11_is_fencei}, {uops_10_is_fencei}, {uops_9_is_fencei}, {uops_8_is_fencei}, {uops_7_is_fencei}, {uops_6_is_fencei}, {uops_5_is_fencei}, {uops_4_is_fencei}, {uops_3_is_fencei}, {uops_2_is_fencei}, {uops_1_is_fencei}, {uops_0_is_fencei}}; // @[util.scala:466:20, :508:19]
assign out_uop_is_fencei = _GEN_57[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_58 = {{uops_15_is_amo}, {uops_14_is_amo}, {uops_13_is_amo}, {uops_12_is_amo}, {uops_11_is_amo}, {uops_10_is_amo}, {uops_9_is_amo}, {uops_8_is_amo}, {uops_7_is_amo}, {uops_6_is_amo}, {uops_5_is_amo}, {uops_4_is_amo}, {uops_3_is_amo}, {uops_2_is_amo}, {uops_1_is_amo}, {uops_0_is_amo}}; // @[util.scala:466:20, :508:19]
assign out_uop_is_amo = _GEN_58[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_59 = {{uops_15_uses_ldq}, {uops_14_uses_ldq}, {uops_13_uses_ldq}, {uops_12_uses_ldq}, {uops_11_uses_ldq}, {uops_10_uses_ldq}, {uops_9_uses_ldq}, {uops_8_uses_ldq}, {uops_7_uses_ldq}, {uops_6_uses_ldq}, {uops_5_uses_ldq}, {uops_4_uses_ldq}, {uops_3_uses_ldq}, {uops_2_uses_ldq}, {uops_1_uses_ldq}, {uops_0_uses_ldq}}; // @[util.scala:466:20, :508:19]
assign out_uop_uses_ldq = _GEN_59[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_60 = {{uops_15_uses_stq}, {uops_14_uses_stq}, {uops_13_uses_stq}, {uops_12_uses_stq}, {uops_11_uses_stq}, {uops_10_uses_stq}, {uops_9_uses_stq}, {uops_8_uses_stq}, {uops_7_uses_stq}, {uops_6_uses_stq}, {uops_5_uses_stq}, {uops_4_uses_stq}, {uops_3_uses_stq}, {uops_2_uses_stq}, {uops_1_uses_stq}, {uops_0_uses_stq}}; // @[util.scala:466:20, :508:19]
assign out_uop_uses_stq = _GEN_60[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_61 = {{uops_15_is_sys_pc2epc}, {uops_14_is_sys_pc2epc}, {uops_13_is_sys_pc2epc}, {uops_12_is_sys_pc2epc}, {uops_11_is_sys_pc2epc}, {uops_10_is_sys_pc2epc}, {uops_9_is_sys_pc2epc}, {uops_8_is_sys_pc2epc}, {uops_7_is_sys_pc2epc}, {uops_6_is_sys_pc2epc}, {uops_5_is_sys_pc2epc}, {uops_4_is_sys_pc2epc}, {uops_3_is_sys_pc2epc}, {uops_2_is_sys_pc2epc}, {uops_1_is_sys_pc2epc}, {uops_0_is_sys_pc2epc}}; // @[util.scala:466:20, :508:19]
assign out_uop_is_sys_pc2epc = _GEN_61[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_62 = {{uops_15_is_unique}, {uops_14_is_unique}, {uops_13_is_unique}, {uops_12_is_unique}, {uops_11_is_unique}, {uops_10_is_unique}, {uops_9_is_unique}, {uops_8_is_unique}, {uops_7_is_unique}, {uops_6_is_unique}, {uops_5_is_unique}, {uops_4_is_unique}, {uops_3_is_unique}, {uops_2_is_unique}, {uops_1_is_unique}, {uops_0_is_unique}}; // @[util.scala:466:20, :508:19]
assign out_uop_is_unique = _GEN_62[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_63 = {{uops_15_flush_on_commit}, {uops_14_flush_on_commit}, {uops_13_flush_on_commit}, {uops_12_flush_on_commit}, {uops_11_flush_on_commit}, {uops_10_flush_on_commit}, {uops_9_flush_on_commit}, {uops_8_flush_on_commit}, {uops_7_flush_on_commit}, {uops_6_flush_on_commit}, {uops_5_flush_on_commit}, {uops_4_flush_on_commit}, {uops_3_flush_on_commit}, {uops_2_flush_on_commit}, {uops_1_flush_on_commit}, {uops_0_flush_on_commit}}; // @[util.scala:466:20, :508:19]
assign out_uop_flush_on_commit = _GEN_63[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_64 = {{uops_15_ldst_is_rs1}, {uops_14_ldst_is_rs1}, {uops_13_ldst_is_rs1}, {uops_12_ldst_is_rs1}, {uops_11_ldst_is_rs1}, {uops_10_ldst_is_rs1}, {uops_9_ldst_is_rs1}, {uops_8_ldst_is_rs1}, {uops_7_ldst_is_rs1}, {uops_6_ldst_is_rs1}, {uops_5_ldst_is_rs1}, {uops_4_ldst_is_rs1}, {uops_3_ldst_is_rs1}, {uops_2_ldst_is_rs1}, {uops_1_ldst_is_rs1}, {uops_0_ldst_is_rs1}}; // @[util.scala:466:20, :508:19]
assign out_uop_ldst_is_rs1 = _GEN_64[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_65 = {{uops_15_ldst}, {uops_14_ldst}, {uops_13_ldst}, {uops_12_ldst}, {uops_11_ldst}, {uops_10_ldst}, {uops_9_ldst}, {uops_8_ldst}, {uops_7_ldst}, {uops_6_ldst}, {uops_5_ldst}, {uops_4_ldst}, {uops_3_ldst}, {uops_2_ldst}, {uops_1_ldst}, {uops_0_ldst}}; // @[util.scala:466:20, :508:19]
assign out_uop_ldst = _GEN_65[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_66 = {{uops_15_lrs1}, {uops_14_lrs1}, {uops_13_lrs1}, {uops_12_lrs1}, {uops_11_lrs1}, {uops_10_lrs1}, {uops_9_lrs1}, {uops_8_lrs1}, {uops_7_lrs1}, {uops_6_lrs1}, {uops_5_lrs1}, {uops_4_lrs1}, {uops_3_lrs1}, {uops_2_lrs1}, {uops_1_lrs1}, {uops_0_lrs1}}; // @[util.scala:466:20, :508:19]
assign out_uop_lrs1 = _GEN_66[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_67 = {{uops_15_lrs2}, {uops_14_lrs2}, {uops_13_lrs2}, {uops_12_lrs2}, {uops_11_lrs2}, {uops_10_lrs2}, {uops_9_lrs2}, {uops_8_lrs2}, {uops_7_lrs2}, {uops_6_lrs2}, {uops_5_lrs2}, {uops_4_lrs2}, {uops_3_lrs2}, {uops_2_lrs2}, {uops_1_lrs2}, {uops_0_lrs2}}; // @[util.scala:466:20, :508:19]
assign out_uop_lrs2 = _GEN_67[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][5:0] _GEN_68 = {{uops_15_lrs3}, {uops_14_lrs3}, {uops_13_lrs3}, {uops_12_lrs3}, {uops_11_lrs3}, {uops_10_lrs3}, {uops_9_lrs3}, {uops_8_lrs3}, {uops_7_lrs3}, {uops_6_lrs3}, {uops_5_lrs3}, {uops_4_lrs3}, {uops_3_lrs3}, {uops_2_lrs3}, {uops_1_lrs3}, {uops_0_lrs3}}; // @[util.scala:466:20, :508:19]
assign out_uop_lrs3 = _GEN_68[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_69 = {{uops_15_ldst_val}, {uops_14_ldst_val}, {uops_13_ldst_val}, {uops_12_ldst_val}, {uops_11_ldst_val}, {uops_10_ldst_val}, {uops_9_ldst_val}, {uops_8_ldst_val}, {uops_7_ldst_val}, {uops_6_ldst_val}, {uops_5_ldst_val}, {uops_4_ldst_val}, {uops_3_ldst_val}, {uops_2_ldst_val}, {uops_1_ldst_val}, {uops_0_ldst_val}}; // @[util.scala:466:20, :508:19]
assign out_uop_ldst_val = _GEN_69[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_70 = {{uops_15_dst_rtype}, {uops_14_dst_rtype}, {uops_13_dst_rtype}, {uops_12_dst_rtype}, {uops_11_dst_rtype}, {uops_10_dst_rtype}, {uops_9_dst_rtype}, {uops_8_dst_rtype}, {uops_7_dst_rtype}, {uops_6_dst_rtype}, {uops_5_dst_rtype}, {uops_4_dst_rtype}, {uops_3_dst_rtype}, {uops_2_dst_rtype}, {uops_1_dst_rtype}, {uops_0_dst_rtype}}; // @[util.scala:466:20, :508:19]
assign out_uop_dst_rtype = _GEN_70[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_71 = {{uops_15_lrs1_rtype}, {uops_14_lrs1_rtype}, {uops_13_lrs1_rtype}, {uops_12_lrs1_rtype}, {uops_11_lrs1_rtype}, {uops_10_lrs1_rtype}, {uops_9_lrs1_rtype}, {uops_8_lrs1_rtype}, {uops_7_lrs1_rtype}, {uops_6_lrs1_rtype}, {uops_5_lrs1_rtype}, {uops_4_lrs1_rtype}, {uops_3_lrs1_rtype}, {uops_2_lrs1_rtype}, {uops_1_lrs1_rtype}, {uops_0_lrs1_rtype}}; // @[util.scala:466:20, :508:19]
assign out_uop_lrs1_rtype = _GEN_71[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_72 = {{uops_15_lrs2_rtype}, {uops_14_lrs2_rtype}, {uops_13_lrs2_rtype}, {uops_12_lrs2_rtype}, {uops_11_lrs2_rtype}, {uops_10_lrs2_rtype}, {uops_9_lrs2_rtype}, {uops_8_lrs2_rtype}, {uops_7_lrs2_rtype}, {uops_6_lrs2_rtype}, {uops_5_lrs2_rtype}, {uops_4_lrs2_rtype}, {uops_3_lrs2_rtype}, {uops_2_lrs2_rtype}, {uops_1_lrs2_rtype}, {uops_0_lrs2_rtype}}; // @[util.scala:466:20, :508:19]
assign out_uop_lrs2_rtype = _GEN_72[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_73 = {{uops_15_frs3_en}, {uops_14_frs3_en}, {uops_13_frs3_en}, {uops_12_frs3_en}, {uops_11_frs3_en}, {uops_10_frs3_en}, {uops_9_frs3_en}, {uops_8_frs3_en}, {uops_7_frs3_en}, {uops_6_frs3_en}, {uops_5_frs3_en}, {uops_4_frs3_en}, {uops_3_frs3_en}, {uops_2_frs3_en}, {uops_1_frs3_en}, {uops_0_frs3_en}}; // @[util.scala:466:20, :508:19]
assign out_uop_frs3_en = _GEN_73[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_74 = {{uops_15_fp_val}, {uops_14_fp_val}, {uops_13_fp_val}, {uops_12_fp_val}, {uops_11_fp_val}, {uops_10_fp_val}, {uops_9_fp_val}, {uops_8_fp_val}, {uops_7_fp_val}, {uops_6_fp_val}, {uops_5_fp_val}, {uops_4_fp_val}, {uops_3_fp_val}, {uops_2_fp_val}, {uops_1_fp_val}, {uops_0_fp_val}}; // @[util.scala:466:20, :508:19]
assign out_uop_fp_val = _GEN_74[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_75 = {{uops_15_fp_single}, {uops_14_fp_single}, {uops_13_fp_single}, {uops_12_fp_single}, {uops_11_fp_single}, {uops_10_fp_single}, {uops_9_fp_single}, {uops_8_fp_single}, {uops_7_fp_single}, {uops_6_fp_single}, {uops_5_fp_single}, {uops_4_fp_single}, {uops_3_fp_single}, {uops_2_fp_single}, {uops_1_fp_single}, {uops_0_fp_single}}; // @[util.scala:466:20, :508:19]
assign out_uop_fp_single = _GEN_75[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_76 = {{uops_15_xcpt_pf_if}, {uops_14_xcpt_pf_if}, {uops_13_xcpt_pf_if}, {uops_12_xcpt_pf_if}, {uops_11_xcpt_pf_if}, {uops_10_xcpt_pf_if}, {uops_9_xcpt_pf_if}, {uops_8_xcpt_pf_if}, {uops_7_xcpt_pf_if}, {uops_6_xcpt_pf_if}, {uops_5_xcpt_pf_if}, {uops_4_xcpt_pf_if}, {uops_3_xcpt_pf_if}, {uops_2_xcpt_pf_if}, {uops_1_xcpt_pf_if}, {uops_0_xcpt_pf_if}}; // @[util.scala:466:20, :508:19]
assign out_uop_xcpt_pf_if = _GEN_76[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_77 = {{uops_15_xcpt_ae_if}, {uops_14_xcpt_ae_if}, {uops_13_xcpt_ae_if}, {uops_12_xcpt_ae_if}, {uops_11_xcpt_ae_if}, {uops_10_xcpt_ae_if}, {uops_9_xcpt_ae_if}, {uops_8_xcpt_ae_if}, {uops_7_xcpt_ae_if}, {uops_6_xcpt_ae_if}, {uops_5_xcpt_ae_if}, {uops_4_xcpt_ae_if}, {uops_3_xcpt_ae_if}, {uops_2_xcpt_ae_if}, {uops_1_xcpt_ae_if}, {uops_0_xcpt_ae_if}}; // @[util.scala:466:20, :508:19]
assign out_uop_xcpt_ae_if = _GEN_77[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_78 = {{uops_15_xcpt_ma_if}, {uops_14_xcpt_ma_if}, {uops_13_xcpt_ma_if}, {uops_12_xcpt_ma_if}, {uops_11_xcpt_ma_if}, {uops_10_xcpt_ma_if}, {uops_9_xcpt_ma_if}, {uops_8_xcpt_ma_if}, {uops_7_xcpt_ma_if}, {uops_6_xcpt_ma_if}, {uops_5_xcpt_ma_if}, {uops_4_xcpt_ma_if}, {uops_3_xcpt_ma_if}, {uops_2_xcpt_ma_if}, {uops_1_xcpt_ma_if}, {uops_0_xcpt_ma_if}}; // @[util.scala:466:20, :508:19]
assign out_uop_xcpt_ma_if = _GEN_78[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_79 = {{uops_15_bp_debug_if}, {uops_14_bp_debug_if}, {uops_13_bp_debug_if}, {uops_12_bp_debug_if}, {uops_11_bp_debug_if}, {uops_10_bp_debug_if}, {uops_9_bp_debug_if}, {uops_8_bp_debug_if}, {uops_7_bp_debug_if}, {uops_6_bp_debug_if}, {uops_5_bp_debug_if}, {uops_4_bp_debug_if}, {uops_3_bp_debug_if}, {uops_2_bp_debug_if}, {uops_1_bp_debug_if}, {uops_0_bp_debug_if}}; // @[util.scala:466:20, :508:19]
assign out_uop_bp_debug_if = _GEN_79[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0] _GEN_80 = {{uops_15_bp_xcpt_if}, {uops_14_bp_xcpt_if}, {uops_13_bp_xcpt_if}, {uops_12_bp_xcpt_if}, {uops_11_bp_xcpt_if}, {uops_10_bp_xcpt_if}, {uops_9_bp_xcpt_if}, {uops_8_bp_xcpt_if}, {uops_7_bp_xcpt_if}, {uops_6_bp_xcpt_if}, {uops_5_bp_xcpt_if}, {uops_4_bp_xcpt_if}, {uops_3_bp_xcpt_if}, {uops_2_bp_xcpt_if}, {uops_1_bp_xcpt_if}, {uops_0_bp_xcpt_if}}; // @[util.scala:466:20, :508:19]
assign out_uop_bp_xcpt_if = _GEN_80[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_81 = {{uops_15_debug_fsrc}, {uops_14_debug_fsrc}, {uops_13_debug_fsrc}, {uops_12_debug_fsrc}, {uops_11_debug_fsrc}, {uops_10_debug_fsrc}, {uops_9_debug_fsrc}, {uops_8_debug_fsrc}, {uops_7_debug_fsrc}, {uops_6_debug_fsrc}, {uops_5_debug_fsrc}, {uops_4_debug_fsrc}, {uops_3_debug_fsrc}, {uops_2_debug_fsrc}, {uops_1_debug_fsrc}, {uops_0_debug_fsrc}}; // @[util.scala:466:20, :508:19]
assign out_uop_debug_fsrc = _GEN_81[deq_ptr_value]; // @[Counter.scala:61:40]
wire [15:0][1:0] _GEN_82 = {{uops_15_debug_tsrc}, {uops_14_debug_tsrc}, {uops_13_debug_tsrc}, {uops_12_debug_tsrc}, {uops_11_debug_tsrc}, {uops_10_debug_tsrc}, {uops_9_debug_tsrc}, {uops_8_debug_tsrc}, {uops_7_debug_tsrc}, {uops_6_debug_tsrc}, {uops_5_debug_tsrc}, {uops_4_debug_tsrc}, {uops_3_debug_tsrc}, {uops_2_debug_tsrc}, {uops_1_debug_tsrc}, {uops_0_debug_tsrc}}; // @[util.scala:466:20, :508:19]
assign out_uop_debug_tsrc = _GEN_82[deq_ptr_value]; // @[Counter.scala:61:40]
wire _io_deq_valid_T = ~io_empty_0; // @[util.scala:448:7, :476:69, :509:30]
wire _io_deq_valid_T_1 = _io_deq_valid_T & _GEN_1; // @[util.scala:476:42, :509:{30,40}]
wire _io_deq_valid_T_5 = _io_deq_valid_T_1; // @[util.scala:509:{40,65}]
assign _io_deq_valid_T_8 = _io_deq_valid_T_5; // @[util.scala:509:{65,108}]
assign io_deq_valid_0 = _io_deq_valid_T_8; // @[util.scala:448:7, :509:108]
assign io_deq_bits_uop_br_mask_0 = _io_deq_bits_uop_br_mask_T_1; // @[util.scala:85:25, :448:7]
wire [4:0] _ptr_diff_T = _GEN_2 - _GEN_3; // @[Counter.scala:77:24]
wire [3:0] ptr_diff = _ptr_diff_T[3:0]; // @[util.scala:524:40]
wire [4:0] _io_count_T_1 = {_io_count_T, ptr_diff}; // @[util.scala:524:40, :526:{20,32}]
assign io_count = _io_count_T_1[3:0]; // @[util.scala:448:7, :526:{14,20}]
wire _GEN_83 = enq_ptr_value == 4'h0; // @[Counter.scala:61:40]
wire _GEN_84 = do_enq & _GEN_83; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_85 = enq_ptr_value == 4'h1; // @[Counter.scala:61:40]
wire _GEN_86 = do_enq & _GEN_85; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_87 = enq_ptr_value == 4'h2; // @[Counter.scala:61:40]
wire _GEN_88 = do_enq & _GEN_87; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_89 = enq_ptr_value == 4'h3; // @[Counter.scala:61:40]
wire _GEN_90 = do_enq & _GEN_89; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_91 = enq_ptr_value == 4'h4; // @[Counter.scala:61:40]
wire _GEN_92 = do_enq & _GEN_91; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_93 = enq_ptr_value == 4'h5; // @[Counter.scala:61:40]
wire _GEN_94 = do_enq & _GEN_93; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_95 = enq_ptr_value == 4'h6; // @[Counter.scala:61:40]
wire _GEN_96 = do_enq & _GEN_95; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_97 = enq_ptr_value == 4'h7; // @[Counter.scala:61:40]
wire _GEN_98 = do_enq & _GEN_97; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_99 = enq_ptr_value == 4'h8; // @[Counter.scala:61:40]
wire _GEN_100 = do_enq & _GEN_99; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_101 = enq_ptr_value == 4'h9; // @[Counter.scala:61:40]
wire _GEN_102 = do_enq & _GEN_101; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_103 = enq_ptr_value == 4'hA; // @[Counter.scala:61:40]
wire _GEN_104 = do_enq & _GEN_103; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_105 = enq_ptr_value == 4'hB; // @[Counter.scala:61:40]
wire _GEN_106 = do_enq & _GEN_105; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_107 = enq_ptr_value == 4'hC; // @[Counter.scala:61:40]
wire _GEN_108 = do_enq & _GEN_107; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_109 = enq_ptr_value == 4'hD; // @[Counter.scala:61:40]
wire _GEN_110 = do_enq & _GEN_109; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_111 = enq_ptr_value == 4'hE; // @[Counter.scala:61:40]
wire _GEN_112 = do_enq & _GEN_111; // @[util.scala:475:24, :481:16, :487:17, :489:33]
wire _GEN_113 = do_enq & (&enq_ptr_value); // @[Counter.scala:61:40]
always @(posedge clock) begin // @[util.scala:448:7]
if (reset) begin // @[util.scala:448:7]
valids_0 <= 1'h0; // @[util.scala:465:24]
valids_1 <= 1'h0; // @[util.scala:465:24]
valids_2 <= 1'h0; // @[util.scala:465:24]
valids_3 <= 1'h0; // @[util.scala:465:24]
valids_4 <= 1'h0; // @[util.scala:465:24]
valids_5 <= 1'h0; // @[util.scala:465:24]
valids_6 <= 1'h0; // @[util.scala:465:24]
valids_7 <= 1'h0; // @[util.scala:465:24]
valids_8 <= 1'h0; // @[util.scala:465:24]
valids_9 <= 1'h0; // @[util.scala:465:24]
valids_10 <= 1'h0; // @[util.scala:465:24]
valids_11 <= 1'h0; // @[util.scala:465:24]
valids_12 <= 1'h0; // @[util.scala:465:24]
valids_13 <= 1'h0; // @[util.scala:465:24]
valids_14 <= 1'h0; // @[util.scala:465:24]
valids_15 <= 1'h0; // @[util.scala:465:24]
enq_ptr_value <= 4'h0; // @[Counter.scala:61:40]
deq_ptr_value <= 4'h0; // @[Counter.scala:61:40]
maybe_full <= 1'h0; // @[util.scala:470:27]
end
else begin // @[util.scala:448:7]
valids_0 <= ~(do_deq & deq_ptr_value == 4'h0) & (_GEN_84 | _valids_0_T_6); // @[Counter.scala:61:40]
valids_1 <= ~(do_deq & deq_ptr_value == 4'h1) & (_GEN_86 | _valids_1_T_6); // @[Counter.scala:61:40]
valids_2 <= ~(do_deq & deq_ptr_value == 4'h2) & (_GEN_88 | _valids_2_T_6); // @[Counter.scala:61:40]
valids_3 <= ~(do_deq & deq_ptr_value == 4'h3) & (_GEN_90 | _valids_3_T_6); // @[Counter.scala:61:40]
valids_4 <= ~(do_deq & deq_ptr_value == 4'h4) & (_GEN_92 | _valids_4_T_6); // @[Counter.scala:61:40]
valids_5 <= ~(do_deq & deq_ptr_value == 4'h5) & (_GEN_94 | _valids_5_T_6); // @[Counter.scala:61:40]
valids_6 <= ~(do_deq & deq_ptr_value == 4'h6) & (_GEN_96 | _valids_6_T_6); // @[Counter.scala:61:40]
valids_7 <= ~(do_deq & deq_ptr_value == 4'h7) & (_GEN_98 | _valids_7_T_6); // @[Counter.scala:61:40]
valids_8 <= ~(do_deq & deq_ptr_value == 4'h8) & (_GEN_100 | _valids_8_T_6); // @[Counter.scala:61:40]
valids_9 <= ~(do_deq & deq_ptr_value == 4'h9) & (_GEN_102 | _valids_9_T_6); // @[Counter.scala:61:40]
valids_10 <= ~(do_deq & deq_ptr_value == 4'hA) & (_GEN_104 | _valids_10_T_6); // @[Counter.scala:61:40]
valids_11 <= ~(do_deq & deq_ptr_value == 4'hB) & (_GEN_106 | _valids_11_T_6); // @[Counter.scala:61:40]
valids_12 <= ~(do_deq & deq_ptr_value == 4'hC) & (_GEN_108 | _valids_12_T_6); // @[Counter.scala:61:40]
valids_13 <= ~(do_deq & deq_ptr_value == 4'hD) & (_GEN_110 | _valids_13_T_6); // @[Counter.scala:61:40]
valids_14 <= ~(do_deq & deq_ptr_value == 4'hE) & (_GEN_112 | _valids_14_T_6); // @[Counter.scala:61:40]
valids_15 <= ~(do_deq & (&deq_ptr_value)) & (_GEN_113 | _valids_15_T_6); // @[Counter.scala:61:40]
if (do_enq) // @[util.scala:475:24]
enq_ptr_value <= _value_T_1; // @[Counter.scala:61:40, :77:24]
if (do_deq) // @[util.scala:476:24]
deq_ptr_value <= _value_T_3; // @[Counter.scala:61:40, :77:24]
if (~(do_enq == do_deq)) // @[util.scala:470:27, :475:24, :476:24, :500:{16,28}, :501:16]
maybe_full <= do_enq; // @[util.scala:470:27, :475:24]
end
if (_GEN_84) begin // @[util.scala:481:16, :487:17, :489:33]
uops_0_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_0_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_0_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_0_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_0_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_0_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_0_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_0_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_0_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_0_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_0_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_0_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_0_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_0_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_0_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_0_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_0_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_0_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_0_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_0_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_0_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_0_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_0_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_0_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_0_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_0_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_0_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_0_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_0_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_0_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_0_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_0_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_0_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_0_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_0_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_0_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_0_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_0_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_0_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_0_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_0_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_0_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_0_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_0_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_0_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_0_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_0_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_0_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_0_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_0_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_0_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_0_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_0_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_0_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_0_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_0_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_0_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_0_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_0_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_0_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_0_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_0_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_0_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_0_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_0_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_0_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_0_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_0_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_0_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_0_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_0_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_0_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_0_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_0_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_0_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_0_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_0_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_0_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_83) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_0_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_0) // @[util.scala:465:24]
uops_0_br_mask <= _uops_0_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_86) begin // @[util.scala:481:16, :487:17, :489:33]
uops_1_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_1_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_1_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_1_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_1_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_1_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_1_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_1_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_1_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_1_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_1_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_1_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_1_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_1_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_1_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_1_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_1_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_1_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_1_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_1_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_1_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_1_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_1_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_1_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_1_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_1_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_1_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_1_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_1_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_1_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_1_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_1_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_1_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_1_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_1_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_1_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_1_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_1_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_1_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_1_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_1_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_1_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_1_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_1_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_1_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_1_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_1_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_1_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_1_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_1_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_1_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_1_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_1_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_1_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_1_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_1_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_1_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_1_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_1_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_1_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_1_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_1_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_1_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_1_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_1_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_1_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_1_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_1_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_1_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_1_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_1_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_1_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_1_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_1_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_1_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_1_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_1_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_1_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_85) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_1_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_1) // @[util.scala:465:24]
uops_1_br_mask <= _uops_1_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_88) begin // @[util.scala:481:16, :487:17, :489:33]
uops_2_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_2_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_2_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_2_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_2_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_2_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_2_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_2_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_2_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_2_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_2_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_2_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_2_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_2_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_2_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_2_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_2_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_2_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_2_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_2_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_2_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_2_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_2_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_2_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_2_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_2_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_2_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_2_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_2_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_2_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_2_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_2_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_2_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_2_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_2_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_2_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_2_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_2_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_2_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_2_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_2_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_2_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_2_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_2_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_2_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_2_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_2_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_2_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_2_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_2_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_2_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_2_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_2_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_2_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_2_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_2_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_2_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_2_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_2_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_2_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_2_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_2_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_2_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_2_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_2_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_2_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_2_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_2_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_2_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_2_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_2_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_2_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_2_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_2_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_2_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_2_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_2_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_2_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_87) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_2_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_2) // @[util.scala:465:24]
uops_2_br_mask <= _uops_2_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_90) begin // @[util.scala:481:16, :487:17, :489:33]
uops_3_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_3_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_3_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_3_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_3_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_3_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_3_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_3_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_3_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_3_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_3_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_3_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_3_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_3_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_3_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_3_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_3_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_3_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_3_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_3_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_3_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_3_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_3_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_3_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_3_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_3_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_3_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_3_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_3_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_3_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_3_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_3_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_3_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_3_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_3_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_3_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_3_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_3_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_3_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_3_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_3_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_3_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_3_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_3_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_3_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_3_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_3_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_3_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_3_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_3_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_3_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_3_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_3_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_3_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_3_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_3_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_3_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_3_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_3_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_3_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_3_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_3_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_3_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_3_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_3_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_3_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_3_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_3_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_3_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_3_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_3_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_3_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_3_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_3_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_3_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_3_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_3_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_3_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_89) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_3_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_3) // @[util.scala:465:24]
uops_3_br_mask <= _uops_3_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_92) begin // @[util.scala:481:16, :487:17, :489:33]
uops_4_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_4_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_4_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_4_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_4_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_4_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_4_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_4_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_4_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_4_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_4_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_4_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_4_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_4_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_4_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_4_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_4_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_4_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_4_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_4_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_4_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_4_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_4_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_4_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_4_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_4_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_4_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_4_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_4_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_4_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_4_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_4_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_4_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_4_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_4_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_4_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_4_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_4_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_4_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_4_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_4_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_4_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_4_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_4_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_4_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_4_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_4_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_4_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_4_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_4_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_4_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_4_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_4_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_4_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_4_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_4_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_4_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_4_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_4_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_4_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_4_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_4_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_4_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_4_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_4_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_4_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_4_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_4_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_4_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_4_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_4_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_4_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_4_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_4_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_4_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_4_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_4_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_4_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_91) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_4_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_4) // @[util.scala:465:24]
uops_4_br_mask <= _uops_4_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_94) begin // @[util.scala:481:16, :487:17, :489:33]
uops_5_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_5_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_5_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_5_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_5_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_5_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_5_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_5_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_5_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_5_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_5_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_5_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_5_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_5_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_5_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_5_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_5_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_5_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_5_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_5_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_5_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_5_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_5_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_5_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_5_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_5_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_5_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_5_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_5_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_5_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_5_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_5_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_5_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_5_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_5_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_5_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_5_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_5_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_5_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_5_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_5_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_5_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_5_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_5_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_5_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_5_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_5_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_5_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_5_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_5_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_5_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_5_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_5_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_5_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_5_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_5_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_5_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_5_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_5_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_5_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_5_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_5_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_5_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_5_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_5_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_5_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_5_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_5_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_5_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_5_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_5_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_5_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_5_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_5_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_5_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_5_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_5_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_5_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_93) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_5_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_5) // @[util.scala:465:24]
uops_5_br_mask <= _uops_5_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_96) begin // @[util.scala:481:16, :487:17, :489:33]
uops_6_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_6_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_6_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_6_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_6_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_6_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_6_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_6_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_6_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_6_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_6_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_6_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_6_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_6_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_6_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_6_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_6_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_6_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_6_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_6_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_6_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_6_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_6_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_6_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_6_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_6_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_6_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_6_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_6_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_6_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_6_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_6_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_6_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_6_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_6_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_6_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_6_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_6_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_6_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_6_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_6_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_6_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_6_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_6_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_6_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_6_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_6_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_6_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_6_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_6_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_6_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_6_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_6_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_6_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_6_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_6_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_6_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_6_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_6_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_6_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_6_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_6_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_6_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_6_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_6_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_6_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_6_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_6_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_6_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_6_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_6_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_6_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_6_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_6_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_6_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_6_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_6_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_6_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_95) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_6_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_6) // @[util.scala:465:24]
uops_6_br_mask <= _uops_6_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_98) begin // @[util.scala:481:16, :487:17, :489:33]
uops_7_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_7_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_7_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_7_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_7_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_7_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_7_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_7_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_7_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_7_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_7_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_7_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_7_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_7_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_7_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_7_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_7_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_7_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_7_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_7_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_7_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_7_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_7_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_7_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_7_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_7_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_7_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_7_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_7_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_7_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_7_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_7_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_7_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_7_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_7_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_7_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_7_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_7_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_7_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_7_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_7_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_7_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_7_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_7_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_7_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_7_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_7_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_7_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_7_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_7_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_7_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_7_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_7_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_7_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_7_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_7_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_7_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_7_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_7_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_7_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_7_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_7_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_7_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_7_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_7_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_7_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_7_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_7_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_7_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_7_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_7_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_7_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_7_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_7_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_7_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_7_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_7_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_7_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_97) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_7_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_7) // @[util.scala:465:24]
uops_7_br_mask <= _uops_7_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_100) begin // @[util.scala:481:16, :487:17, :489:33]
uops_8_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_8_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_8_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_8_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_8_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_8_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_8_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_8_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_8_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_8_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_8_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_8_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_8_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_8_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_8_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_8_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_8_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_8_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_8_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_8_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_8_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_8_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_8_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_8_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_8_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_8_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_8_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_8_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_8_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_8_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_8_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_8_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_8_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_8_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_8_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_8_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_8_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_8_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_8_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_8_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_8_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_8_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_8_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_8_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_8_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_8_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_8_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_8_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_8_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_8_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_8_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_8_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_8_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_8_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_8_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_8_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_8_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_8_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_8_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_8_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_8_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_8_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_8_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_8_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_8_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_8_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_8_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_8_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_8_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_8_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_8_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_8_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_8_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_8_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_8_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_8_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_8_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_8_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_99) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_8_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_8) // @[util.scala:465:24]
uops_8_br_mask <= _uops_8_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_102) begin // @[util.scala:481:16, :487:17, :489:33]
uops_9_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_9_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_9_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_9_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_9_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_9_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_9_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_9_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_9_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_9_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_9_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_9_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_9_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_9_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_9_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_9_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_9_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_9_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_9_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_9_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_9_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_9_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_9_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_9_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_9_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_9_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_9_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_9_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_9_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_9_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_9_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_9_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_9_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_9_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_9_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_9_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_9_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_9_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_9_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_9_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_9_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_9_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_9_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_9_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_9_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_9_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_9_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_9_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_9_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_9_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_9_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_9_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_9_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_9_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_9_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_9_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_9_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_9_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_9_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_9_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_9_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_9_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_9_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_9_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_9_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_9_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_9_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_9_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_9_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_9_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_9_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_9_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_9_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_9_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_9_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_9_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_9_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_9_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_101) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_9_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_9) // @[util.scala:465:24]
uops_9_br_mask <= _uops_9_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_104) begin // @[util.scala:481:16, :487:17, :489:33]
uops_10_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_10_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_10_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_10_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_10_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_10_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_10_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_10_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_10_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_10_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_10_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_10_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_10_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_10_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_10_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_10_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_10_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_10_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_10_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_10_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_10_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_10_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_10_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_10_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_10_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_10_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_10_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_10_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_10_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_10_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_10_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_10_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_10_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_10_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_10_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_10_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_10_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_10_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_10_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_10_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_10_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_10_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_10_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_10_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_10_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_10_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_10_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_10_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_10_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_10_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_10_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_10_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_10_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_10_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_10_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_10_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_10_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_10_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_10_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_10_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_10_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_10_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_10_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_10_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_10_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_10_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_10_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_10_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_10_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_10_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_10_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_10_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_10_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_10_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_10_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_10_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_10_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_10_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_103) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_10_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_10) // @[util.scala:465:24]
uops_10_br_mask <= _uops_10_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_106) begin // @[util.scala:481:16, :487:17, :489:33]
uops_11_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_11_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_11_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_11_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_11_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_11_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_11_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_11_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_11_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_11_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_11_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_11_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_11_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_11_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_11_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_11_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_11_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_11_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_11_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_11_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_11_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_11_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_11_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_11_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_11_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_11_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_11_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_11_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_11_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_11_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_11_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_11_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_11_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_11_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_11_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_11_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_11_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_11_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_11_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_11_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_11_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_11_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_11_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_11_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_11_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_11_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_11_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_11_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_11_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_11_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_11_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_11_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_11_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_11_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_11_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_11_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_11_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_11_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_11_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_11_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_11_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_11_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_11_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_11_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_11_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_11_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_11_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_11_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_11_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_11_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_11_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_11_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_11_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_11_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_11_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_11_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_11_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_11_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_105) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_11_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_11) // @[util.scala:465:24]
uops_11_br_mask <= _uops_11_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_108) begin // @[util.scala:481:16, :487:17, :489:33]
uops_12_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_12_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_12_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_12_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_12_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_12_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_12_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_12_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_12_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_12_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_12_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_12_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_12_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_12_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_12_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_12_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_12_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_12_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_12_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_12_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_12_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_12_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_12_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_12_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_12_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_12_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_12_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_12_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_12_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_12_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_12_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_12_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_12_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_12_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_12_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_12_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_12_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_12_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_12_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_12_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_12_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_12_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_12_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_12_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_12_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_12_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_12_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_12_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_12_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_12_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_12_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_12_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_12_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_12_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_12_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_12_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_12_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_12_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_12_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_12_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_12_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_12_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_12_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_12_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_12_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_12_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_12_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_12_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_12_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_12_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_12_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_12_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_12_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_12_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_12_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_12_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_12_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_12_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_107) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_12_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_12) // @[util.scala:465:24]
uops_12_br_mask <= _uops_12_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_110) begin // @[util.scala:481:16, :487:17, :489:33]
uops_13_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_13_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_13_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_13_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_13_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_13_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_13_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_13_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_13_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_13_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_13_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_13_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_13_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_13_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_13_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_13_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_13_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_13_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_13_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_13_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_13_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_13_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_13_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_13_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_13_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_13_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_13_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_13_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_13_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_13_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_13_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_13_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_13_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_13_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_13_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_13_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_13_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_13_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_13_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_13_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_13_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_13_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_13_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_13_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_13_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_13_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_13_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_13_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_13_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_13_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_13_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_13_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_13_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_13_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_13_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_13_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_13_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_13_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_13_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_13_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_13_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_13_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_13_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_13_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_13_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_13_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_13_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_13_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_13_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_13_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_13_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_13_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_13_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_13_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_13_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_13_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_13_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_13_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_109) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_13_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_13) // @[util.scala:465:24]
uops_13_br_mask <= _uops_13_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_112) begin // @[util.scala:481:16, :487:17, :489:33]
uops_14_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_14_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_14_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_14_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_14_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_14_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_14_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_14_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_14_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_14_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_14_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_14_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_14_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_14_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_14_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_14_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_14_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_14_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_14_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_14_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_14_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_14_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_14_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_14_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_14_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_14_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_14_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_14_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_14_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_14_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_14_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_14_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_14_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_14_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_14_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_14_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_14_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_14_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_14_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_14_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_14_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_14_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_14_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_14_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_14_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_14_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_14_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_14_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_14_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_14_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_14_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_14_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_14_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_14_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_14_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_14_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_14_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_14_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_14_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_14_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_14_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_14_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_14_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_14_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_14_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_14_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_14_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_14_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_14_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_14_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_14_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_14_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_14_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_14_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_14_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_14_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_14_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_14_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & _GEN_111) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33]
uops_14_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_14) // @[util.scala:465:24]
uops_14_br_mask <= _uops_14_br_mask_T_1; // @[util.scala:89:21, :466:20]
if (_GEN_113) begin // @[util.scala:481:16, :487:17, :489:33]
uops_15_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20]
uops_15_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20]
uops_15_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20]
uops_15_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20]
uops_15_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20]
uops_15_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20]
uops_15_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20]
uops_15_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20]
uops_15_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20]
uops_15_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20]
uops_15_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20]
uops_15_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20]
uops_15_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20]
uops_15_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20]
uops_15_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20]
uops_15_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20]
uops_15_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20]
uops_15_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20]
uops_15_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20]
uops_15_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20]
uops_15_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20]
uops_15_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20]
uops_15_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20]
uops_15_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20]
uops_15_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20]
uops_15_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20]
uops_15_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20]
uops_15_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20]
uops_15_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20]
uops_15_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20]
uops_15_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20]
uops_15_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20]
uops_15_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20]
uops_15_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20]
uops_15_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20]
uops_15_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20]
uops_15_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20]
uops_15_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20]
uops_15_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20]
uops_15_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20]
uops_15_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20]
uops_15_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20]
uops_15_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20]
uops_15_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20]
uops_15_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20]
uops_15_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20]
uops_15_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20]
uops_15_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20]
uops_15_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20]
uops_15_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20]
uops_15_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20]
uops_15_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20]
uops_15_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20]
uops_15_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20]
uops_15_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20]
uops_15_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20]
uops_15_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20]
uops_15_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20]
uops_15_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20]
uops_15_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20]
uops_15_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20]
uops_15_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20]
uops_15_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20]
uops_15_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20]
uops_15_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20]
uops_15_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20]
uops_15_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20]
uops_15_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20]
uops_15_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20]
uops_15_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20]
uops_15_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20]
uops_15_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20]
uops_15_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20]
uops_15_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20]
uops_15_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20]
uops_15_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20]
uops_15_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20]
uops_15_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20]
end
if (do_enq & (&enq_ptr_value)) // @[Counter.scala:61:40]
uops_15_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20]
else if (valids_15) // @[util.scala:465:24]
uops_15_br_mask <= _uops_15_br_mask_T_1; // @[util.scala:89:21, :466:20]
always @(posedge)
ram_16x131 ram_ext ( // @[util.scala:464:20]
.R0_addr (deq_ptr_value), // @[Counter.scala:61:40]
.R0_en (1'h1),
.R0_clk (clock),
.R0_data (_ram_ext_R0_data),
.W0_addr (enq_ptr_value), // @[Counter.scala:61:40]
.W0_en (do_enq), // @[util.scala:475:24]
.W0_clk (clock),
.W0_data ({io_enq_bits_sdq_id_0, io_enq_bits_way_en_0, io_enq_bits_old_meta_tag_0, io_enq_bits_old_meta_coh_state_0, io_enq_bits_tag_match_0, io_enq_bits_is_hella_0, io_enq_bits_data_0, io_enq_bits_addr_0}) // @[util.scala:448:7, :464:20]
); // @[util.scala:464:20]
assign io_enq_ready = io_enq_ready_0; // @[util.scala:448:7]
assign io_deq_valid = io_deq_valid_0; // @[util.scala:448:7]
assign io_deq_bits_uop_uopc = io_deq_bits_uop_uopc_0; // @[util.scala:448:7]
assign io_deq_bits_uop_inst = io_deq_bits_uop_inst_0; // @[util.scala:448:7]
assign io_deq_bits_uop_debug_inst = io_deq_bits_uop_debug_inst_0; // @[util.scala:448:7]
assign io_deq_bits_uop_is_rvc = io_deq_bits_uop_is_rvc_0; // @[util.scala:448:7]
assign io_deq_bits_uop_debug_pc = io_deq_bits_uop_debug_pc_0; // @[util.scala:448:7]
assign io_deq_bits_uop_iq_type = io_deq_bits_uop_iq_type_0; // @[util.scala:448:7]
assign io_deq_bits_uop_fu_code = io_deq_bits_uop_fu_code_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ctrl_br_type = io_deq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ctrl_op1_sel = io_deq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ctrl_op2_sel = io_deq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ctrl_imm_sel = io_deq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ctrl_op_fcn = io_deq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ctrl_fcn_dw = io_deq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ctrl_csr_cmd = io_deq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ctrl_is_load = io_deq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ctrl_is_sta = io_deq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ctrl_is_std = io_deq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7]
assign io_deq_bits_uop_iw_state = io_deq_bits_uop_iw_state_0; // @[util.scala:448:7]
assign io_deq_bits_uop_iw_p1_poisoned = io_deq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7]
assign io_deq_bits_uop_iw_p2_poisoned = io_deq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7]
assign io_deq_bits_uop_is_br = io_deq_bits_uop_is_br_0; // @[util.scala:448:7]
assign io_deq_bits_uop_is_jalr = io_deq_bits_uop_is_jalr_0; // @[util.scala:448:7]
assign io_deq_bits_uop_is_jal = io_deq_bits_uop_is_jal_0; // @[util.scala:448:7]
assign io_deq_bits_uop_is_sfb = io_deq_bits_uop_is_sfb_0; // @[util.scala:448:7]
assign io_deq_bits_uop_br_mask = io_deq_bits_uop_br_mask_0; // @[util.scala:448:7]
assign io_deq_bits_uop_br_tag = io_deq_bits_uop_br_tag_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ftq_idx = io_deq_bits_uop_ftq_idx_0; // @[util.scala:448:7]
assign io_deq_bits_uop_edge_inst = io_deq_bits_uop_edge_inst_0; // @[util.scala:448:7]
assign io_deq_bits_uop_pc_lob = io_deq_bits_uop_pc_lob_0; // @[util.scala:448:7]
assign io_deq_bits_uop_taken = io_deq_bits_uop_taken_0; // @[util.scala:448:7]
assign io_deq_bits_uop_imm_packed = io_deq_bits_uop_imm_packed_0; // @[util.scala:448:7]
assign io_deq_bits_uop_csr_addr = io_deq_bits_uop_csr_addr_0; // @[util.scala:448:7]
assign io_deq_bits_uop_rob_idx = io_deq_bits_uop_rob_idx_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ldq_idx = io_deq_bits_uop_ldq_idx_0; // @[util.scala:448:7]
assign io_deq_bits_uop_stq_idx = io_deq_bits_uop_stq_idx_0; // @[util.scala:448:7]
assign io_deq_bits_uop_rxq_idx = io_deq_bits_uop_rxq_idx_0; // @[util.scala:448:7]
assign io_deq_bits_uop_pdst = io_deq_bits_uop_pdst_0; // @[util.scala:448:7]
assign io_deq_bits_uop_prs1 = io_deq_bits_uop_prs1_0; // @[util.scala:448:7]
assign io_deq_bits_uop_prs2 = io_deq_bits_uop_prs2_0; // @[util.scala:448:7]
assign io_deq_bits_uop_prs3 = io_deq_bits_uop_prs3_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ppred = io_deq_bits_uop_ppred_0; // @[util.scala:448:7]
assign io_deq_bits_uop_prs1_busy = io_deq_bits_uop_prs1_busy_0; // @[util.scala:448:7]
assign io_deq_bits_uop_prs2_busy = io_deq_bits_uop_prs2_busy_0; // @[util.scala:448:7]
assign io_deq_bits_uop_prs3_busy = io_deq_bits_uop_prs3_busy_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ppred_busy = io_deq_bits_uop_ppred_busy_0; // @[util.scala:448:7]
assign io_deq_bits_uop_stale_pdst = io_deq_bits_uop_stale_pdst_0; // @[util.scala:448:7]
assign io_deq_bits_uop_exception = io_deq_bits_uop_exception_0; // @[util.scala:448:7]
assign io_deq_bits_uop_exc_cause = io_deq_bits_uop_exc_cause_0; // @[util.scala:448:7]
assign io_deq_bits_uop_bypassable = io_deq_bits_uop_bypassable_0; // @[util.scala:448:7]
assign io_deq_bits_uop_mem_cmd = io_deq_bits_uop_mem_cmd_0; // @[util.scala:448:7]
assign io_deq_bits_uop_mem_size = io_deq_bits_uop_mem_size_0; // @[util.scala:448:7]
assign io_deq_bits_uop_mem_signed = io_deq_bits_uop_mem_signed_0; // @[util.scala:448:7]
assign io_deq_bits_uop_is_fence = io_deq_bits_uop_is_fence_0; // @[util.scala:448:7]
assign io_deq_bits_uop_is_fencei = io_deq_bits_uop_is_fencei_0; // @[util.scala:448:7]
assign io_deq_bits_uop_is_amo = io_deq_bits_uop_is_amo_0; // @[util.scala:448:7]
assign io_deq_bits_uop_uses_ldq = io_deq_bits_uop_uses_ldq_0; // @[util.scala:448:7]
assign io_deq_bits_uop_uses_stq = io_deq_bits_uop_uses_stq_0; // @[util.scala:448:7]
assign io_deq_bits_uop_is_sys_pc2epc = io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7]
assign io_deq_bits_uop_is_unique = io_deq_bits_uop_is_unique_0; // @[util.scala:448:7]
assign io_deq_bits_uop_flush_on_commit = io_deq_bits_uop_flush_on_commit_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ldst_is_rs1 = io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ldst = io_deq_bits_uop_ldst_0; // @[util.scala:448:7]
assign io_deq_bits_uop_lrs1 = io_deq_bits_uop_lrs1_0; // @[util.scala:448:7]
assign io_deq_bits_uop_lrs2 = io_deq_bits_uop_lrs2_0; // @[util.scala:448:7]
assign io_deq_bits_uop_lrs3 = io_deq_bits_uop_lrs3_0; // @[util.scala:448:7]
assign io_deq_bits_uop_ldst_val = io_deq_bits_uop_ldst_val_0; // @[util.scala:448:7]
assign io_deq_bits_uop_dst_rtype = io_deq_bits_uop_dst_rtype_0; // @[util.scala:448:7]
assign io_deq_bits_uop_lrs1_rtype = io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7]
assign io_deq_bits_uop_lrs2_rtype = io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7]
assign io_deq_bits_uop_frs3_en = io_deq_bits_uop_frs3_en_0; // @[util.scala:448:7]
assign io_deq_bits_uop_fp_val = io_deq_bits_uop_fp_val_0; // @[util.scala:448:7]
assign io_deq_bits_uop_fp_single = io_deq_bits_uop_fp_single_0; // @[util.scala:448:7]
assign io_deq_bits_uop_xcpt_pf_if = io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7]
assign io_deq_bits_uop_xcpt_ae_if = io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7]
assign io_deq_bits_uop_xcpt_ma_if = io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7]
assign io_deq_bits_uop_bp_debug_if = io_deq_bits_uop_bp_debug_if_0; // @[util.scala:448:7]
assign io_deq_bits_uop_bp_xcpt_if = io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7]
assign io_deq_bits_uop_debug_fsrc = io_deq_bits_uop_debug_fsrc_0; // @[util.scala:448:7]
assign io_deq_bits_uop_debug_tsrc = io_deq_bits_uop_debug_tsrc_0; // @[util.scala:448:7]
assign io_deq_bits_addr = io_deq_bits_addr_0; // @[util.scala:448:7]
assign io_deq_bits_data = io_deq_bits_data_0; // @[util.scala:448:7]
assign io_deq_bits_is_hella = io_deq_bits_is_hella_0; // @[util.scala:448:7]
assign io_deq_bits_tag_match = io_deq_bits_tag_match_0; // @[util.scala:448:7]
assign io_deq_bits_old_meta_coh_state = io_deq_bits_old_meta_coh_state_0; // @[util.scala:448:7]
assign io_deq_bits_old_meta_tag = io_deq_bits_old_meta_tag_0; // @[util.scala:448:7]
assign io_deq_bits_sdq_id = io_deq_bits_sdq_id_0; // @[util.scala:448:7]
assign io_empty = io_empty_0; // @[util.scala:448:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_61( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_71 io_out_sink_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_51( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_200( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File MulRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to
Chisel by Andrew Waterman).
Copyright 2019, 2020 The Regents of the University of California. All rights
reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1))
})
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf)
val notNaN_isInfOut = io.a.isInf || io.b.isInf
val notNaN_isZeroOut = io.a.isZero || io.b.isZero
val notNaN_signOut = io.a.sign ^ io.b.sign
val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S
val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc
io.rawOut.isInf := notNaN_isInfOut
io.rawOut.isZero := notNaN_isZeroOut
io.rawOut.sExp := common_sExpOut
io.rawOut.isNaN := io.a.isNaN || io.b.isNaN
io.rawOut.sign := notNaN_signOut
io.rawOut.sig := common_sigOut
}
class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth))
mulFullRaw.io.a := io.a
mulFullRaw.io.b := io.b
io.invalidExc := mulFullRaw.io.invalidExc
io.rawOut := mulFullRaw.io.rawOut
io.rawOut.sig := {
val sig = mulFullRaw.io.rawOut.sig
Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR)
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(UInt((expWidth + sigWidth + 1).W))
val b = Input(UInt((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(Bool())
val out = Output(UInt((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(UInt(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulRawFN = Module(new MulRawFN(expWidth, sigWidth))
mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a)
mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulRawFN.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulFullRawFN_3( // @[MulRecFN.scala:47:7]
input io_a_isNaN, // @[MulRecFN.scala:49:16]
input io_a_isInf, // @[MulRecFN.scala:49:16]
input io_a_isZero, // @[MulRecFN.scala:49:16]
input io_a_sign, // @[MulRecFN.scala:49:16]
input [9:0] io_a_sExp, // @[MulRecFN.scala:49:16]
input [24:0] io_a_sig, // @[MulRecFN.scala:49:16]
input io_b_isNaN, // @[MulRecFN.scala:49:16]
input io_b_isInf, // @[MulRecFN.scala:49:16]
input io_b_isZero, // @[MulRecFN.scala:49:16]
input io_b_sign, // @[MulRecFN.scala:49:16]
input [9:0] io_b_sExp, // @[MulRecFN.scala:49:16]
input [24:0] io_b_sig, // @[MulRecFN.scala:49:16]
output io_invalidExc, // @[MulRecFN.scala:49:16]
output io_rawOut_isNaN, // @[MulRecFN.scala:49:16]
output io_rawOut_isInf, // @[MulRecFN.scala:49:16]
output io_rawOut_isZero, // @[MulRecFN.scala:49:16]
output io_rawOut_sign, // @[MulRecFN.scala:49:16]
output [9:0] io_rawOut_sExp, // @[MulRecFN.scala:49:16]
output [47:0] io_rawOut_sig // @[MulRecFN.scala:49:16]
);
wire io_a_isNaN_0 = io_a_isNaN; // @[MulRecFN.scala:47:7]
wire io_a_isInf_0 = io_a_isInf; // @[MulRecFN.scala:47:7]
wire io_a_isZero_0 = io_a_isZero; // @[MulRecFN.scala:47:7]
wire io_a_sign_0 = io_a_sign; // @[MulRecFN.scala:47:7]
wire [9:0] io_a_sExp_0 = io_a_sExp; // @[MulRecFN.scala:47:7]
wire [24:0] io_a_sig_0 = io_a_sig; // @[MulRecFN.scala:47:7]
wire io_b_isNaN_0 = io_b_isNaN; // @[MulRecFN.scala:47:7]
wire io_b_isInf_0 = io_b_isInf; // @[MulRecFN.scala:47:7]
wire io_b_isZero_0 = io_b_isZero; // @[MulRecFN.scala:47:7]
wire io_b_sign_0 = io_b_sign; // @[MulRecFN.scala:47:7]
wire [9:0] io_b_sExp_0 = io_b_sExp; // @[MulRecFN.scala:47:7]
wire [24:0] io_b_sig_0 = io_b_sig; // @[MulRecFN.scala:47:7]
wire _io_invalidExc_T_7; // @[MulRecFN.scala:66:71]
wire _io_rawOut_isNaN_T; // @[MulRecFN.scala:70:35]
wire notNaN_isInfOut; // @[MulRecFN.scala:59:38]
wire notNaN_isZeroOut; // @[MulRecFN.scala:60:40]
wire notNaN_signOut; // @[MulRecFN.scala:61:36]
wire [9:0] common_sExpOut; // @[MulRecFN.scala:62:48]
wire [47:0] common_sigOut; // @[MulRecFN.scala:63:46]
wire io_rawOut_isNaN_0; // @[MulRecFN.scala:47:7]
wire io_rawOut_isInf_0; // @[MulRecFN.scala:47:7]
wire io_rawOut_isZero_0; // @[MulRecFN.scala:47:7]
wire io_rawOut_sign_0; // @[MulRecFN.scala:47:7]
wire [9:0] io_rawOut_sExp_0; // @[MulRecFN.scala:47:7]
wire [47:0] io_rawOut_sig_0; // @[MulRecFN.scala:47:7]
wire io_invalidExc_0; // @[MulRecFN.scala:47:7]
wire _notSigNaN_invalidExc_T = io_a_isInf_0 & io_b_isZero_0; // @[MulRecFN.scala:47:7, :58:44]
wire _notSigNaN_invalidExc_T_1 = io_a_isZero_0 & io_b_isInf_0; // @[MulRecFN.scala:47:7, :58:76]
wire notSigNaN_invalidExc = _notSigNaN_invalidExc_T | _notSigNaN_invalidExc_T_1; // @[MulRecFN.scala:58:{44,60,76}]
assign notNaN_isInfOut = io_a_isInf_0 | io_b_isInf_0; // @[MulRecFN.scala:47:7, :59:38]
assign io_rawOut_isInf_0 = notNaN_isInfOut; // @[MulRecFN.scala:47:7, :59:38]
assign notNaN_isZeroOut = io_a_isZero_0 | io_b_isZero_0; // @[MulRecFN.scala:47:7, :60:40]
assign io_rawOut_isZero_0 = notNaN_isZeroOut; // @[MulRecFN.scala:47:7, :60:40]
assign notNaN_signOut = io_a_sign_0 ^ io_b_sign_0; // @[MulRecFN.scala:47:7, :61:36]
assign io_rawOut_sign_0 = notNaN_signOut; // @[MulRecFN.scala:47:7, :61:36]
wire [10:0] _common_sExpOut_T = {io_a_sExp_0[9], io_a_sExp_0} + {io_b_sExp_0[9], io_b_sExp_0}; // @[MulRecFN.scala:47:7, :62:36]
wire [9:0] _common_sExpOut_T_1 = _common_sExpOut_T[9:0]; // @[MulRecFN.scala:62:36]
wire [9:0] _common_sExpOut_T_2 = _common_sExpOut_T_1; // @[MulRecFN.scala:62:36]
wire [10:0] _common_sExpOut_T_3 = {_common_sExpOut_T_2[9], _common_sExpOut_T_2} - 11'h100; // @[MulRecFN.scala:62:{36,48}]
wire [9:0] _common_sExpOut_T_4 = _common_sExpOut_T_3[9:0]; // @[MulRecFN.scala:62:48]
assign common_sExpOut = _common_sExpOut_T_4; // @[MulRecFN.scala:62:48]
assign io_rawOut_sExp_0 = common_sExpOut; // @[MulRecFN.scala:47:7, :62:48]
wire [49:0] _common_sigOut_T = {25'h0, io_a_sig_0} * {25'h0, io_b_sig_0}; // @[MulRecFN.scala:47:7, :63:35]
assign common_sigOut = _common_sigOut_T[47:0]; // @[MulRecFN.scala:63:{35,46}]
assign io_rawOut_sig_0 = common_sigOut; // @[MulRecFN.scala:47:7, :63:46]
wire _io_invalidExc_T = io_a_sig_0[22]; // @[common.scala:82:56]
wire _io_invalidExc_T_1 = ~_io_invalidExc_T; // @[common.scala:82:{49,56}]
wire _io_invalidExc_T_2 = io_a_isNaN_0 & _io_invalidExc_T_1; // @[common.scala:82:{46,49}]
wire _io_invalidExc_T_3 = io_b_sig_0[22]; // @[common.scala:82:56]
wire _io_invalidExc_T_4 = ~_io_invalidExc_T_3; // @[common.scala:82:{49,56}]
wire _io_invalidExc_T_5 = io_b_isNaN_0 & _io_invalidExc_T_4; // @[common.scala:82:{46,49}]
wire _io_invalidExc_T_6 = _io_invalidExc_T_2 | _io_invalidExc_T_5; // @[common.scala:82:46]
assign _io_invalidExc_T_7 = _io_invalidExc_T_6 | notSigNaN_invalidExc; // @[MulRecFN.scala:58:60, :66:{45,71}]
assign io_invalidExc_0 = _io_invalidExc_T_7; // @[MulRecFN.scala:47:7, :66:71]
assign _io_rawOut_isNaN_T = io_a_isNaN_0 | io_b_isNaN_0; // @[MulRecFN.scala:47:7, :70:35]
assign io_rawOut_isNaN_0 = _io_rawOut_isNaN_T; // @[MulRecFN.scala:47:7, :70:35]
assign io_invalidExc = io_invalidExc_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_sign = io_rawOut_sign_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulRecFN.scala:47:7]
assign io_rawOut_sig = io_rawOut_sig_0; // @[MulRecFN.scala:47:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File fNFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
object fNFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits) =
{
val minNormExp = (BigInt(1)<<(expWidth - 1)) + 2
val rawIn = rawFloatFromRecFN(expWidth, sigWidth, in)
val isSubnormal = rawIn.sExp < minNormExp.S
val denormShiftDist = 1.U - rawIn.sExp(log2Up(sigWidth - 1) - 1, 0)
val denormFract = ((rawIn.sig>>1)>>denormShiftDist)(sigWidth - 2, 0)
val expOut =
Mux(isSubnormal,
0.U,
rawIn.sExp(expWidth - 1, 0) -
((BigInt(1)<<(expWidth - 1)) + 1).U
) | Fill(expWidth, rawIn.isNaN || rawIn.isInf)
val fractOut =
Mux(isSubnormal,
denormFract,
Mux(rawIn.isInf, 0.U, rawIn.sig(sigWidth - 2, 0))
)
Cat(rawIn.sign, expOut, fractOut)
}
}
File execution-unit.scala:
//******************************************************************************
// Copyright (c) 2013 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Execution Units
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// The issue window schedules micro-ops onto a specific execution pipeline
// A given execution pipeline may contain multiple functional units; one or more
// read ports, and one or more writeports.
package boom.v3.exu
import scala.collection.mutable.{ArrayBuffer}
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.rocket.{BP}
import freechips.rocketchip.tile
import FUConstants._
import boom.v3.common._
import boom.v3.ifu.{GetPCFromFtqIO}
import boom.v3.util.{ImmGen, IsKilledByBranch, BranchKillableQueue, BoomCoreStringPrefix}
/**
* Response from Execution Unit. Bundles a MicroOp with data
*
* @param dataWidth width of the data coming from the execution unit
*/
class ExeUnitResp(val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
with HasBoomUOP
{
val data = Bits(dataWidth.W)
val predicated = Bool() // Was this predicated off?
val fflags = new ValidIO(new FFlagsResp) // write fflags to ROB // TODO: Do this better
}
/**
* Floating Point flag response
*/
class FFlagsResp(implicit p: Parameters) extends BoomBundle
{
val uop = new MicroOp()
val flags = Bits(tile.FPConstants.FLAGS_SZ.W)
}
/**
* Abstract Top level Execution Unit that wraps lower level functional units to make a
* multi function execution unit.
*
* @param readsIrf does this exe unit need a integer regfile port
* @param writesIrf does this exe unit need a integer regfile port
* @param readsFrf does this exe unit need a integer regfile port
* @param writesFrf does this exe unit need a integer regfile port
* @param writesLlIrf does this exe unit need a integer regfile port
* @param writesLlFrf does this exe unit need a integer regfile port
* @param numBypassStages number of bypass ports for the exe unit
* @param dataWidth width of the data coming out of the exe unit
* @param bypassable is the exe unit able to be bypassed
* @param hasMem does the exe unit have a MemAddrCalcUnit
* @param hasCSR does the exe unit write to the CSRFile
* @param hasBrUnit does the exe unit have a branch unit
* @param hasAlu does the exe unit have a alu
* @param hasFpu does the exe unit have a fpu
* @param hasMul does the exe unit have a multiplier
* @param hasDiv does the exe unit have a divider
* @param hasFdiv does the exe unit have a FP divider
* @param hasIfpu does the exe unit have a int to FP unit
* @param hasFpiu does the exe unit have a FP to int unit
*/
abstract class ExecutionUnit(
val readsIrf : Boolean = false,
val writesIrf : Boolean = false,
val readsFrf : Boolean = false,
val writesFrf : Boolean = false,
val writesLlIrf : Boolean = false,
val writesLlFrf : Boolean = false,
val numBypassStages : Int,
val dataWidth : Int,
val bypassable : Boolean = false, // TODO make override def for code clarity
val alwaysBypassable : Boolean = false,
val hasMem : Boolean = false,
val hasCSR : Boolean = false,
val hasJmpUnit : Boolean = false,
val hasAlu : Boolean = false,
val hasFpu : Boolean = false,
val hasMul : Boolean = false,
val hasDiv : Boolean = false,
val hasFdiv : Boolean = false,
val hasIfpu : Boolean = false,
val hasFpiu : Boolean = false,
val hasRocc : Boolean = false
)(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val fu_types = Output(Bits(FUC_SZ.W))
val req = Flipped(new DecoupledIO(new FuncUnitReq(dataWidth)))
val iresp = if (writesIrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val fresp = if (writesFrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val ll_iresp = if (writesLlIrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val ll_fresp = if (writesLlFrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val bypass = Output(Vec(numBypassStages, Valid(new ExeUnitResp(dataWidth))))
val brupdate = Input(new BrUpdateInfo())
// only used by the rocc unit
val rocc = if (hasRocc) new RoCCShimCoreIO else null
// only used by the branch unit
val brinfo = if (hasAlu) Output(new BrResolutionInfo()) else null
val get_ftq_pc = if (hasJmpUnit) Flipped(new GetPCFromFtqIO()) else null
val status = Input(new freechips.rocketchip.rocket.MStatus())
// only used by the fpu unit
val fcsr_rm = if (hasFcsr) Input(Bits(tile.FPConstants.RM_SZ.W)) else null
// only used by the mem unit
val lsu_io = if (hasMem) Flipped(new boom.v3.lsu.LSUExeIO) else null
val bp = if (hasMem) Input(Vec(nBreakpoints, new BP)) else null
val mcontext = if (hasMem) Input(UInt(coreParams.mcontextWidth.W)) else null
val scontext = if (hasMem) Input(UInt(coreParams.scontextWidth.W)) else null
// TODO move this out of ExecutionUnit
val com_exception = if (hasMem || hasRocc) Input(Bool()) else null
})
io.req.ready := false.B
if (writesIrf) {
io.iresp.valid := false.B
io.iresp.bits := DontCare
io.iresp.bits.fflags.valid := false.B
io.iresp.bits.predicated := false.B
assert(io.iresp.ready)
}
if (writesLlIrf) {
io.ll_iresp.valid := false.B
io.ll_iresp.bits := DontCare
io.ll_iresp.bits.fflags.valid := false.B
io.ll_iresp.bits.predicated := false.B
}
if (writesFrf) {
io.fresp.valid := false.B
io.fresp.bits := DontCare
io.fresp.bits.fflags.valid := false.B
io.fresp.bits.predicated := false.B
assert(io.fresp.ready)
}
if (writesLlFrf) {
io.ll_fresp.valid := false.B
io.ll_fresp.bits := DontCare
io.ll_fresp.bits.fflags.valid := false.B
io.ll_fresp.bits.predicated := false.B
}
// TODO add "number of fflag ports", so we can properly account for FPU+Mem combinations
def hasFFlags : Boolean = hasFpu || hasFdiv
require ((hasFpu || hasFdiv) ^ (hasAlu || hasMul || hasMem || hasIfpu),
"[execute] we no longer support mixing FP and Integer functional units in the same exe unit.")
def hasFcsr = hasIfpu || hasFpu || hasFdiv
require (bypassable || !alwaysBypassable,
"[execute] an execution unit must be bypassable if it is always bypassable")
def supportedFuncUnits = {
new SupportedFuncUnits(
alu = hasAlu,
jmp = hasJmpUnit,
mem = hasMem,
muld = hasMul || hasDiv,
fpu = hasFpu,
csr = hasCSR,
fdiv = hasFdiv,
ifpu = hasIfpu)
}
}
/**
* ALU execution unit that can have a branch, alu, mul, div, int to FP,
* and memory unit.
*
* @param hasBrUnit does the exe unit have a branch unit
* @param hasCSR does the exe unit write to the CSRFile
* @param hasAlu does the exe unit have a alu
* @param hasMul does the exe unit have a multiplier
* @param hasDiv does the exe unit have a divider
* @param hasIfpu does the exe unit have a int to FP unit
* @param hasMem does the exe unit have a MemAddrCalcUnit
*/
class ALUExeUnit(
hasJmpUnit : Boolean = false,
hasCSR : Boolean = false,
hasAlu : Boolean = true,
hasMul : Boolean = false,
hasDiv : Boolean = false,
hasIfpu : Boolean = false,
hasMem : Boolean = false,
hasRocc : Boolean = false)
(implicit p: Parameters)
extends ExecutionUnit(
readsIrf = true,
writesIrf = hasAlu || hasMul || hasDiv,
writesLlIrf = hasMem || hasRocc,
writesLlFrf = (hasIfpu || hasMem) && p(tile.TileKey).core.fpu != None,
numBypassStages =
if (hasAlu && hasMul) 3 //TODO XXX p(tile.TileKey).core.imulLatency
else if (hasAlu) 1 else 0,
dataWidth = 64 + 1,
bypassable = hasAlu,
alwaysBypassable = hasAlu && !(hasMem || hasJmpUnit || hasMul || hasDiv || hasCSR || hasIfpu || hasRocc),
hasCSR = hasCSR,
hasJmpUnit = hasJmpUnit,
hasAlu = hasAlu,
hasMul = hasMul,
hasDiv = hasDiv,
hasIfpu = hasIfpu,
hasMem = hasMem,
hasRocc = hasRocc)
with freechips.rocketchip.rocket.constants.MemoryOpConstants
{
require(!(hasRocc && !hasCSR),
"RoCC needs to be shared with CSR unit")
require(!(hasMem && hasRocc),
"We do not support execution unit with both Mem and Rocc writebacks")
require(!(hasMem && hasIfpu),
"TODO. Currently do not support AluMemExeUnit with FP")
val out_str =
BoomCoreStringPrefix("==ExeUnit==") +
(if (hasAlu) BoomCoreStringPrefix(" - ALU") else "") +
(if (hasMul) BoomCoreStringPrefix(" - Mul") else "") +
(if (hasDiv) BoomCoreStringPrefix(" - Div") else "") +
(if (hasIfpu) BoomCoreStringPrefix(" - IFPU") else "") +
(if (hasMem) BoomCoreStringPrefix(" - Mem") else "") +
(if (hasRocc) BoomCoreStringPrefix(" - RoCC") else "")
override def toString: String = out_str.toString
val div_busy = WireInit(false.B)
val ifpu_busy = WireInit(false.B)
// The Functional Units --------------------
// Specifically the functional units with fast writeback to IRF
val iresp_fu_units = ArrayBuffer[FunctionalUnit]()
io.fu_types := Mux(hasAlu.B, FU_ALU, 0.U) |
Mux(hasMul.B, FU_MUL, 0.U) |
Mux(!div_busy && hasDiv.B, FU_DIV, 0.U) |
Mux(hasCSR.B, FU_CSR, 0.U) |
Mux(hasJmpUnit.B, FU_JMP, 0.U) |
Mux(!ifpu_busy && hasIfpu.B, FU_I2F, 0.U) |
Mux(hasMem.B, FU_MEM, 0.U)
// ALU Unit -------------------------------
var alu: ALUUnit = null
if (hasAlu) {
alu = Module(new ALUUnit(isJmpUnit = hasJmpUnit,
numStages = numBypassStages,
dataWidth = xLen))
alu.io.req.valid := (
io.req.valid &&
(io.req.bits.uop.fu_code === FU_ALU ||
io.req.bits.uop.fu_code === FU_JMP ||
(io.req.bits.uop.fu_code === FU_CSR && io.req.bits.uop.uopc =/= uopROCC)))
//ROCC Rocc Commands are taken by the RoCC unit
alu.io.req.bits.uop := io.req.bits.uop
alu.io.req.bits.kill := io.req.bits.kill
alu.io.req.bits.rs1_data := io.req.bits.rs1_data
alu.io.req.bits.rs2_data := io.req.bits.rs2_data
alu.io.req.bits.rs3_data := DontCare
alu.io.req.bits.pred_data := io.req.bits.pred_data
alu.io.resp.ready := DontCare
alu.io.brupdate := io.brupdate
iresp_fu_units += alu
// Bypassing only applies to ALU
io.bypass := alu.io.bypass
// branch unit is embedded inside the ALU
io.brinfo := alu.io.brinfo
if (hasJmpUnit) {
alu.io.get_ftq_pc <> io.get_ftq_pc
}
}
var rocc: RoCCShim = null
if (hasRocc) {
rocc = Module(new RoCCShim)
rocc.io.req.valid := io.req.valid && io.req.bits.uop.uopc === uopROCC
rocc.io.req.bits := DontCare
rocc.io.req.bits.uop := io.req.bits.uop
rocc.io.req.bits.kill := io.req.bits.kill
rocc.io.req.bits.rs1_data := io.req.bits.rs1_data
rocc.io.req.bits.rs2_data := io.req.bits.rs2_data
rocc.io.brupdate := io.brupdate // We should assert on this somewhere
rocc.io.status := io.status
rocc.io.exception := io.com_exception
io.rocc <> rocc.io.core
rocc.io.resp.ready := io.ll_iresp.ready
io.ll_iresp.valid := rocc.io.resp.valid
io.ll_iresp.bits.uop := rocc.io.resp.bits.uop
io.ll_iresp.bits.data := rocc.io.resp.bits.data
}
// Pipelined, IMul Unit ------------------
var imul: PipelinedMulUnit = null
if (hasMul) {
imul = Module(new PipelinedMulUnit(imulLatency, xLen))
imul.io <> DontCare
imul.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_MUL)
imul.io.req.bits.uop := io.req.bits.uop
imul.io.req.bits.rs1_data := io.req.bits.rs1_data
imul.io.req.bits.rs2_data := io.req.bits.rs2_data
imul.io.req.bits.kill := io.req.bits.kill
imul.io.brupdate := io.brupdate
iresp_fu_units += imul
}
var ifpu: IntToFPUnit = null
if (hasIfpu) {
ifpu = Module(new IntToFPUnit(latency=intToFpLatency))
ifpu.io.req <> io.req
ifpu.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_I2F)
ifpu.io.fcsr_rm := io.fcsr_rm
ifpu.io.brupdate <> io.brupdate
ifpu.io.resp.ready := DontCare
// buffer up results since we share write-port on integer regfile.
val queue = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth),
entries = intToFpLatency + 3)) // TODO being overly conservative
queue.io.enq.valid := ifpu.io.resp.valid
queue.io.enq.bits.uop := ifpu.io.resp.bits.uop
queue.io.enq.bits.data := ifpu.io.resp.bits.data
queue.io.enq.bits.predicated := ifpu.io.resp.bits.predicated
queue.io.enq.bits.fflags := ifpu.io.resp.bits.fflags
queue.io.brupdate := io.brupdate
queue.io.flush := io.req.bits.kill
io.ll_fresp <> queue.io.deq
ifpu_busy := !(queue.io.empty)
assert (queue.io.enq.ready)
}
// Div/Rem Unit -----------------------
var div: DivUnit = null
val div_resp_val = WireInit(false.B)
if (hasDiv) {
div = Module(new DivUnit(xLen))
div.io <> DontCare
div.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_DIV) && hasDiv.B
div.io.req.bits.uop := io.req.bits.uop
div.io.req.bits.rs1_data := io.req.bits.rs1_data
div.io.req.bits.rs2_data := io.req.bits.rs2_data
div.io.brupdate := io.brupdate
div.io.req.bits.kill := io.req.bits.kill
// share write port with the pipelined units
div.io.resp.ready := !(iresp_fu_units.map(_.io.resp.valid).reduce(_|_))
div_resp_val := div.io.resp.valid
div_busy := !div.io.req.ready ||
(io.req.valid && io.req.bits.uop.fu_code_is(FU_DIV))
iresp_fu_units += div
}
// Mem Unit --------------------------
if (hasMem) {
require(!hasAlu)
val maddrcalc = Module(new MemAddrCalcUnit)
maddrcalc.io.req <> io.req
maddrcalc.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_MEM)
maddrcalc.io.brupdate <> io.brupdate
maddrcalc.io.status := io.status
maddrcalc.io.bp := io.bp
maddrcalc.io.mcontext := io.mcontext
maddrcalc.io.scontext := io.scontext
maddrcalc.io.resp.ready := DontCare
require(numBypassStages == 0)
io.lsu_io.req := maddrcalc.io.resp
io.ll_iresp <> io.lsu_io.iresp
if (usingFPU) {
io.ll_fresp <> io.lsu_io.fresp
}
}
// Outputs (Write Port #0) ---------------
if (writesIrf) {
io.iresp.valid := iresp_fu_units.map(_.io.resp.valid).reduce(_|_)
io.iresp.bits.uop := PriorityMux(iresp_fu_units.map(f =>
(f.io.resp.valid, f.io.resp.bits.uop)).toSeq)
io.iresp.bits.data := PriorityMux(iresp_fu_units.map(f =>
(f.io.resp.valid, f.io.resp.bits.data)).toSeq)
io.iresp.bits.predicated := PriorityMux(iresp_fu_units.map(f =>
(f.io.resp.valid, f.io.resp.bits.predicated)).toSeq)
// pulled out for critical path reasons
// TODO: Does this make sense as part of the iresp bundle?
if (hasAlu) {
io.iresp.bits.uop.csr_addr := ImmGen(alu.io.resp.bits.uop.imm_packed, IS_I).asUInt
io.iresp.bits.uop.ctrl.csr_cmd := alu.io.resp.bits.uop.ctrl.csr_cmd
}
}
assert ((PopCount(iresp_fu_units.map(_.io.resp.valid)) <= 1.U && !div_resp_val) ||
(PopCount(iresp_fu_units.map(_.io.resp.valid)) <= 2.U && (div_resp_val)),
"Multiple functional units are fighting over the write port.")
}
/**
* FPU-only unit, with optional second write-port for ToInt micro-ops.
*
* @param hasFpu does the exe unit have a fpu
* @param hasFdiv does the exe unit have a FP divider
* @param hasFpiu does the exe unit have a FP to int unit
*/
class FPUExeUnit(
hasFpu : Boolean = true,
hasFdiv : Boolean = false,
hasFpiu : Boolean = false
)
(implicit p: Parameters)
extends ExecutionUnit(
readsFrf = true,
writesFrf = true,
writesLlIrf = hasFpiu,
writesIrf = false,
numBypassStages = 0,
dataWidth = p(tile.TileKey).core.fpu.get.fLen + 1,
bypassable = false,
hasFpu = hasFpu,
hasFdiv = hasFdiv,
hasFpiu = hasFpiu) with tile.HasFPUParameters
{
val out_str =
BoomCoreStringPrefix("==ExeUnit==")
(if (hasFpu) BoomCoreStringPrefix("- FPU (Latency: " + dfmaLatency + ")") else "") +
(if (hasFdiv) BoomCoreStringPrefix("- FDiv/FSqrt") else "") +
(if (hasFpiu) BoomCoreStringPrefix("- FPIU (writes to Integer RF)") else "")
val fdiv_busy = WireInit(false.B)
val fpiu_busy = WireInit(false.B)
// The Functional Units --------------------
val fu_units = ArrayBuffer[FunctionalUnit]()
io.fu_types := Mux(hasFpu.B, FU_FPU, 0.U) |
Mux(!fdiv_busy && hasFdiv.B, FU_FDV, 0.U) |
Mux(!fpiu_busy && hasFpiu.B, FU_F2I, 0.U)
// FPU Unit -----------------------
var fpu: FPUUnit = null
val fpu_resp_val = WireInit(false.B)
val fpu_resp_fflags = Wire(new ValidIO(new FFlagsResp()))
fpu_resp_fflags.valid := false.B
if (hasFpu) {
fpu = Module(new FPUUnit())
fpu.io.req.valid := io.req.valid &&
(io.req.bits.uop.fu_code_is(FU_FPU) ||
io.req.bits.uop.fu_code_is(FU_F2I)) // TODO move to using a separate unit
fpu.io.req.bits.uop := io.req.bits.uop
fpu.io.req.bits.rs1_data := io.req.bits.rs1_data
fpu.io.req.bits.rs2_data := io.req.bits.rs2_data
fpu.io.req.bits.rs3_data := io.req.bits.rs3_data
fpu.io.req.bits.pred_data := false.B
fpu.io.req.bits.kill := io.req.bits.kill
fpu.io.fcsr_rm := io.fcsr_rm
fpu.io.brupdate := io.brupdate
fpu.io.resp.ready := DontCare
fpu_resp_val := fpu.io.resp.valid
fpu_resp_fflags := fpu.io.resp.bits.fflags
fu_units += fpu
}
// FDiv/FSqrt Unit -----------------------
var fdivsqrt: FDivSqrtUnit = null
val fdiv_resp_fflags = Wire(new ValidIO(new FFlagsResp()))
fdiv_resp_fflags := DontCare
fdiv_resp_fflags.valid := false.B
if (hasFdiv) {
fdivsqrt = Module(new FDivSqrtUnit())
fdivsqrt.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_FDV)
fdivsqrt.io.req.bits.uop := io.req.bits.uop
fdivsqrt.io.req.bits.rs1_data := io.req.bits.rs1_data
fdivsqrt.io.req.bits.rs2_data := io.req.bits.rs2_data
fdivsqrt.io.req.bits.rs3_data := DontCare
fdivsqrt.io.req.bits.pred_data := false.B
fdivsqrt.io.req.bits.kill := io.req.bits.kill
fdivsqrt.io.fcsr_rm := io.fcsr_rm
fdivsqrt.io.brupdate := io.brupdate
// share write port with the pipelined units
fdivsqrt.io.resp.ready := !(fu_units.map(_.io.resp.valid).reduce(_|_)) // TODO PERF will get blocked by fpiu.
fdiv_busy := !fdivsqrt.io.req.ready || (io.req.valid && io.req.bits.uop.fu_code_is(FU_FDV))
fdiv_resp_fflags := fdivsqrt.io.resp.bits.fflags
fu_units += fdivsqrt
}
// Outputs (Write Port #0) ---------------
io.fresp.valid := fu_units.map(_.io.resp.valid).reduce(_|_) &&
!(fpu.io.resp.valid && fpu.io.resp.bits.uop.fu_code_is(FU_F2I))
io.fresp.bits.uop := PriorityMux(fu_units.map(f => (f.io.resp.valid,
f.io.resp.bits.uop)).toSeq)
io.fresp.bits.data:= PriorityMux(fu_units.map(f => (f.io.resp.valid, f.io.resp.bits.data)).toSeq)
io.fresp.bits.fflags := Mux(fpu_resp_val, fpu_resp_fflags, fdiv_resp_fflags)
// Outputs (Write Port #1) -- FpToInt Queuing Unit -----------------------
if (hasFpiu) {
// TODO instantiate our own fpiu; and remove it from fpu.scala.
// buffer up results since we share write-port on integer regfile.
val queue = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth),
entries = dfmaLatency + 3)) // TODO being overly conservative
queue.io.enq.valid := (fpu.io.resp.valid &&
fpu.io.resp.bits.uop.fu_code_is(FU_F2I) &&
fpu.io.resp.bits.uop.uopc =/= uopSTA) // STA means store data gen for floating point
queue.io.enq.bits.uop := fpu.io.resp.bits.uop
queue.io.enq.bits.data := fpu.io.resp.bits.data
queue.io.enq.bits.predicated := fpu.io.resp.bits.predicated
queue.io.enq.bits.fflags := fpu.io.resp.bits.fflags
queue.io.brupdate := io.brupdate
queue.io.flush := io.req.bits.kill
assert (queue.io.enq.ready) // If this backs up, we've miscalculated the size of the queue.
val fp_sdq = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth),
entries = 3)) // Lets us backpressure floating point store data
fp_sdq.io.enq.valid := io.req.valid && io.req.bits.uop.uopc === uopSTA && !IsKilledByBranch(io.brupdate, io.req.bits.uop)
fp_sdq.io.enq.bits.uop := io.req.bits.uop
fp_sdq.io.enq.bits.data := ieee(io.req.bits.rs2_data)
fp_sdq.io.enq.bits.predicated := false.B
fp_sdq.io.enq.bits.fflags := DontCare
fp_sdq.io.brupdate := io.brupdate
fp_sdq.io.flush := io.req.bits.kill
assert(!(fp_sdq.io.enq.valid && !fp_sdq.io.enq.ready))
val resp_arb = Module(new Arbiter(new ExeUnitResp(dataWidth), 2))
resp_arb.io.in(0) <> queue.io.deq
resp_arb.io.in(1) <> fp_sdq.io.deq
io.ll_iresp <> resp_arb.io.out
fpiu_busy := !(queue.io.empty && fp_sdq.io.empty)
}
override def toString: String = out_str.toString
}
File micro-op.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// MicroOp
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.common
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.exu.FUConstants
/**
* Extension to BoomBundle to add a MicroOp
*/
abstract trait HasBoomUOP extends BoomBundle
{
val uop = new MicroOp()
}
/**
* MicroOp passing through the pipeline
*/
class MicroOp(implicit p: Parameters) extends BoomBundle
with freechips.rocketchip.rocket.constants.MemoryOpConstants
with freechips.rocketchip.rocket.constants.ScalarOpConstants
{
val uopc = UInt(UOPC_SZ.W) // micro-op code
val inst = UInt(32.W)
val debug_inst = UInt(32.W)
val is_rvc = Bool()
val debug_pc = UInt(coreMaxAddrBits.W)
val iq_type = UInt(IQT_SZ.W) // which issue unit do we use?
val fu_code = UInt(FUConstants.FUC_SZ.W) // which functional unit do we use?
val ctrl = new CtrlSignals
// What is the next state of this uop in the issue window? useful
// for the compacting queue.
val iw_state = UInt(2.W)
// Has operand 1 or 2 been waken speculatively by a load?
// Only integer operands are speculaively woken up,
// so we can ignore p3.
val iw_p1_poisoned = Bool()
val iw_p2_poisoned = Bool()
val is_br = Bool() // is this micro-op a (branch) vs a regular PC+4 inst?
val is_jalr = Bool() // is this a jump? (jal or jalr)
val is_jal = Bool() // is this a JAL (doesn't include JR)? used for branch unit
val is_sfb = Bool() // is this a sfb or in the shadow of a sfb
val br_mask = UInt(maxBrCount.W) // which branches are we being speculated under?
val br_tag = UInt(brTagSz.W)
// Index into FTQ to figure out our fetch PC.
val ftq_idx = UInt(log2Ceil(ftqSz).W)
// This inst straddles two fetch packets
val edge_inst = Bool()
// Low-order bits of our own PC. Combine with ftq[ftq_idx] to get PC.
// Aligned to a cache-line size, as that is the greater fetch granularity.
// TODO: Shouldn't this be aligned to fetch-width size?
val pc_lob = UInt(log2Ceil(icBlockBytes).W)
// Was this a branch that was predicted taken?
val taken = Bool()
val imm_packed = UInt(LONGEST_IMM_SZ.W) // densely pack the imm in decode...
// then translate and sign-extend in execute
val csr_addr = UInt(CSR_ADDR_SZ.W) // only used for critical path reasons in Exe
val rob_idx = UInt(robAddrSz.W)
val ldq_idx = UInt(ldqAddrSz.W)
val stq_idx = UInt(stqAddrSz.W)
val rxq_idx = UInt(log2Ceil(numRxqEntries).W)
val pdst = UInt(maxPregSz.W)
val prs1 = UInt(maxPregSz.W)
val prs2 = UInt(maxPregSz.W)
val prs3 = UInt(maxPregSz.W)
val ppred = UInt(log2Ceil(ftqSz).W)
val prs1_busy = Bool()
val prs2_busy = Bool()
val prs3_busy = Bool()
val ppred_busy = Bool()
val stale_pdst = UInt(maxPregSz.W)
val exception = Bool()
val exc_cause = UInt(xLen.W) // TODO compress this down, xlen is insanity
val bypassable = Bool() // can we bypass ALU results? (doesn't include loads, csr, etc...)
val mem_cmd = UInt(M_SZ.W) // sync primitives/cache flushes
val mem_size = UInt(2.W)
val mem_signed = Bool()
val is_fence = Bool()
val is_fencei = Bool()
val is_amo = Bool()
val uses_ldq = Bool()
val uses_stq = Bool()
val is_sys_pc2epc = Bool() // Is a ECall or Breakpoint -- both set EPC to PC.
val is_unique = Bool() // only allow this instruction in the pipeline, wait for STQ to
// drain, clear fetcha fter it (tell ROB to un-ready until empty)
val flush_on_commit = Bool() // some instructions need to flush the pipeline behind them
// Preditation
def is_sfb_br = is_br && is_sfb && enableSFBOpt.B // Does this write a predicate
def is_sfb_shadow = !is_br && is_sfb && enableSFBOpt.B // Is this predicated
val ldst_is_rs1 = Bool() // If this is set and we are predicated off, copy rs1 to dst,
// else copy rs2 to dst
// logical specifiers (only used in Decode->Rename), except rollback (ldst)
val ldst = UInt(lregSz.W)
val lrs1 = UInt(lregSz.W)
val lrs2 = UInt(lregSz.W)
val lrs3 = UInt(lregSz.W)
val ldst_val = Bool() // is there a destination? invalid for stores, rd==x0, etc.
val dst_rtype = UInt(2.W)
val lrs1_rtype = UInt(2.W)
val lrs2_rtype = UInt(2.W)
val frs3_en = Bool()
// floating point information
val fp_val = Bool() // is a floating-point instruction (F- or D-extension)?
// If it's non-ld/st it will write back exception bits to the fcsr.
val fp_single = Bool() // single-precision floating point instruction (F-extension)
// frontend exception information
val xcpt_pf_if = Bool() // I-TLB page fault.
val xcpt_ae_if = Bool() // I$ access exception.
val xcpt_ma_if = Bool() // Misaligned fetch (jal/brjumping to misaligned addr).
val bp_debug_if = Bool() // Breakpoint
val bp_xcpt_if = Bool() // Breakpoint
// What prediction structure provides the prediction FROM this op
val debug_fsrc = UInt(BSRC_SZ.W)
// What prediction structure provides the prediction TO this op
val debug_tsrc = UInt(BSRC_SZ.W)
// Do we allocate a branch tag for this?
// SFB branches don't get a mask, they get a predicate bit
def allocate_brtag = (is_br && !is_sfb) || is_jalr
// Does this register write-back
def rf_wen = dst_rtype =/= RT_X
// Is it possible for this uop to misspeculate, preventing the commit of subsequent uops?
def unsafe = uses_ldq || (uses_stq && !is_fence) || is_br || is_jalr
def fu_code_is(_fu: UInt) = (fu_code & _fu) =/= 0.U
}
/**
* Control signals within a MicroOp
*
* TODO REFACTOR this, as this should no longer be true, as bypass occurs in stage before branch resolution
*/
class CtrlSignals extends Bundle()
{
val br_type = UInt(BR_N.getWidth.W)
val op1_sel = UInt(OP1_X.getWidth.W)
val op2_sel = UInt(OP2_X.getWidth.W)
val imm_sel = UInt(IS_X.getWidth.W)
val op_fcn = UInt(freechips.rocketchip.rocket.ALU.SZ_ALU_FN.W)
val fcn_dw = Bool()
val csr_cmd = UInt(freechips.rocketchip.rocket.CSR.SZ.W)
val is_load = Bool() // will invoke TLB address lookup
val is_sta = Bool() // will invoke TLB address lookup
val is_std = Bool()
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module FPUExeUnit( // @[execution-unit.scala:437:7]
input clock, // @[execution-unit.scala:437:7]
input reset, // @[execution-unit.scala:437:7]
output [9:0] io_fu_types, // @[execution-unit.scala:104:14]
input io_req_valid, // @[execution-unit.scala:104:14]
input [6:0] io_req_bits_uop_uopc, // @[execution-unit.scala:104:14]
input [31:0] io_req_bits_uop_inst, // @[execution-unit.scala:104:14]
input [31:0] io_req_bits_uop_debug_inst, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_rvc, // @[execution-unit.scala:104:14]
input [39:0] io_req_bits_uop_debug_pc, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_iq_type, // @[execution-unit.scala:104:14]
input [9:0] io_req_bits_uop_fu_code, // @[execution-unit.scala:104:14]
input [3:0] io_req_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
input [4:0] io_req_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_iw_state, // @[execution-unit.scala:104:14]
input io_req_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
input io_req_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_br, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_jalr, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_jal, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_sfb, // @[execution-unit.scala:104:14]
input [15:0] io_req_bits_uop_br_mask, // @[execution-unit.scala:104:14]
input [3:0] io_req_bits_uop_br_tag, // @[execution-unit.scala:104:14]
input [4:0] io_req_bits_uop_ftq_idx, // @[execution-unit.scala:104:14]
input io_req_bits_uop_edge_inst, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_pc_lob, // @[execution-unit.scala:104:14]
input io_req_bits_uop_taken, // @[execution-unit.scala:104:14]
input [19:0] io_req_bits_uop_imm_packed, // @[execution-unit.scala:104:14]
input [11:0] io_req_bits_uop_csr_addr, // @[execution-unit.scala:104:14]
input [6:0] io_req_bits_uop_rob_idx, // @[execution-unit.scala:104:14]
input [4:0] io_req_bits_uop_ldq_idx, // @[execution-unit.scala:104:14]
input [4:0] io_req_bits_uop_stq_idx, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_rxq_idx, // @[execution-unit.scala:104:14]
input [6:0] io_req_bits_uop_pdst, // @[execution-unit.scala:104:14]
input [6:0] io_req_bits_uop_prs1, // @[execution-unit.scala:104:14]
input [6:0] io_req_bits_uop_prs2, // @[execution-unit.scala:104:14]
input [6:0] io_req_bits_uop_prs3, // @[execution-unit.scala:104:14]
input [4:0] io_req_bits_uop_ppred, // @[execution-unit.scala:104:14]
input io_req_bits_uop_prs1_busy, // @[execution-unit.scala:104:14]
input io_req_bits_uop_prs2_busy, // @[execution-unit.scala:104:14]
input io_req_bits_uop_prs3_busy, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ppred_busy, // @[execution-unit.scala:104:14]
input [6:0] io_req_bits_uop_stale_pdst, // @[execution-unit.scala:104:14]
input io_req_bits_uop_exception, // @[execution-unit.scala:104:14]
input [63:0] io_req_bits_uop_exc_cause, // @[execution-unit.scala:104:14]
input io_req_bits_uop_bypassable, // @[execution-unit.scala:104:14]
input [4:0] io_req_bits_uop_mem_cmd, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_mem_size, // @[execution-unit.scala:104:14]
input io_req_bits_uop_mem_signed, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_fence, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_fencei, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_amo, // @[execution-unit.scala:104:14]
input io_req_bits_uop_uses_ldq, // @[execution-unit.scala:104:14]
input io_req_bits_uop_uses_stq, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_unique, // @[execution-unit.scala:104:14]
input io_req_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_ldst, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_lrs1, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_lrs2, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_lrs3, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ldst_val, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_dst_rtype, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
input io_req_bits_uop_frs3_en, // @[execution-unit.scala:104:14]
input io_req_bits_uop_fp_val, // @[execution-unit.scala:104:14]
input io_req_bits_uop_fp_single, // @[execution-unit.scala:104:14]
input io_req_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
input io_req_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
input io_req_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
input io_req_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14]
input io_req_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14]
input [64:0] io_req_bits_rs1_data, // @[execution-unit.scala:104:14]
input [64:0] io_req_bits_rs2_data, // @[execution-unit.scala:104:14]
input [64:0] io_req_bits_rs3_data, // @[execution-unit.scala:104:14]
input io_req_bits_kill, // @[execution-unit.scala:104:14]
output io_fresp_valid, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_uop_uopc, // @[execution-unit.scala:104:14]
output [31:0] io_fresp_bits_uop_inst, // @[execution-unit.scala:104:14]
output [31:0] io_fresp_bits_uop_debug_inst, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_rvc, // @[execution-unit.scala:104:14]
output [39:0] io_fresp_bits_uop_debug_pc, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_iq_type, // @[execution-unit.scala:104:14]
output [9:0] io_fresp_bits_uop_fu_code, // @[execution-unit.scala:104:14]
output [3:0] io_fresp_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_iw_state, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_br, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_jalr, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_jal, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_sfb, // @[execution-unit.scala:104:14]
output [15:0] io_fresp_bits_uop_br_mask, // @[execution-unit.scala:104:14]
output [3:0] io_fresp_bits_uop_br_tag, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_uop_ftq_idx, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_edge_inst, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_pc_lob, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_taken, // @[execution-unit.scala:104:14]
output [19:0] io_fresp_bits_uop_imm_packed, // @[execution-unit.scala:104:14]
output [11:0] io_fresp_bits_uop_csr_addr, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_uop_rob_idx, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_uop_ldq_idx, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_uop_stq_idx, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_rxq_idx, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_uop_pdst, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_uop_prs1, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_uop_prs2, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_uop_prs3, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_uop_ppred, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_prs1_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_prs2_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_prs3_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ppred_busy, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_uop_stale_pdst, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_exception, // @[execution-unit.scala:104:14]
output [63:0] io_fresp_bits_uop_exc_cause, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_bypassable, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_uop_mem_cmd, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_mem_size, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_mem_signed, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_fence, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_fencei, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_amo, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_uses_ldq, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_uses_stq, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_unique, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_ldst, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_lrs1, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_lrs2, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_lrs3, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ldst_val, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_dst_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_frs3_en, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_fp_val, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_fp_single, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14]
output [64:0] io_fresp_bits_data, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_valid, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_fflags_bits_uop_uopc, // @[execution-unit.scala:104:14]
output [31:0] io_fresp_bits_fflags_bits_uop_inst, // @[execution-unit.scala:104:14]
output [31:0] io_fresp_bits_fflags_bits_uop_debug_inst, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_rvc, // @[execution-unit.scala:104:14]
output [39:0] io_fresp_bits_fflags_bits_uop_debug_pc, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_iq_type, // @[execution-unit.scala:104:14]
output [9:0] io_fresp_bits_fflags_bits_uop_fu_code, // @[execution-unit.scala:104:14]
output [3:0] io_fresp_bits_fflags_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_iw_state, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_br, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_jalr, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_jal, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_sfb, // @[execution-unit.scala:104:14]
output [15:0] io_fresp_bits_fflags_bits_uop_br_mask, // @[execution-unit.scala:104:14]
output [3:0] io_fresp_bits_fflags_bits_uop_br_tag, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_uop_ftq_idx, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_edge_inst, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_pc_lob, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_taken, // @[execution-unit.scala:104:14]
output [19:0] io_fresp_bits_fflags_bits_uop_imm_packed, // @[execution-unit.scala:104:14]
output [11:0] io_fresp_bits_fflags_bits_uop_csr_addr, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_fflags_bits_uop_rob_idx, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_uop_ldq_idx, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_uop_stq_idx, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_rxq_idx, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_fflags_bits_uop_pdst, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_fflags_bits_uop_prs1, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_fflags_bits_uop_prs2, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_fflags_bits_uop_prs3, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_uop_ppred, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_prs1_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_prs2_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_prs3_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ppred_busy, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_fflags_bits_uop_stale_pdst, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_exception, // @[execution-unit.scala:104:14]
output [63:0] io_fresp_bits_fflags_bits_uop_exc_cause, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_bypassable, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_uop_mem_cmd, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_mem_size, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_mem_signed, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_fence, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_fencei, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_amo, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_uses_ldq, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_uses_stq, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_unique, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_ldst, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_lrs1, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_lrs2, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_lrs3, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ldst_val, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_dst_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_frs3_en, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_fp_val, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_fp_single, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_flags, // @[execution-unit.scala:104:14]
input io_ll_iresp_ready, // @[execution-unit.scala:104:14]
output io_ll_iresp_valid, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_uop_uopc, // @[execution-unit.scala:104:14]
output [31:0] io_ll_iresp_bits_uop_inst, // @[execution-unit.scala:104:14]
output [31:0] io_ll_iresp_bits_uop_debug_inst, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_rvc, // @[execution-unit.scala:104:14]
output [39:0] io_ll_iresp_bits_uop_debug_pc, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_iq_type, // @[execution-unit.scala:104:14]
output [9:0] io_ll_iresp_bits_uop_fu_code, // @[execution-unit.scala:104:14]
output [3:0] io_ll_iresp_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_iw_state, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_br, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_jalr, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_jal, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_sfb, // @[execution-unit.scala:104:14]
output [15:0] io_ll_iresp_bits_uop_br_mask, // @[execution-unit.scala:104:14]
output [3:0] io_ll_iresp_bits_uop_br_tag, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_uop_ftq_idx, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_edge_inst, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_pc_lob, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_taken, // @[execution-unit.scala:104:14]
output [19:0] io_ll_iresp_bits_uop_imm_packed, // @[execution-unit.scala:104:14]
output [11:0] io_ll_iresp_bits_uop_csr_addr, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_uop_rob_idx, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_uop_ldq_idx, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_uop_stq_idx, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_rxq_idx, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_uop_pdst, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_uop_prs1, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_uop_prs2, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_uop_prs3, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_uop_ppred, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_prs1_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_prs2_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_prs3_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ppred_busy, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_uop_stale_pdst, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_exception, // @[execution-unit.scala:104:14]
output [63:0] io_ll_iresp_bits_uop_exc_cause, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_bypassable, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_uop_mem_cmd, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_mem_size, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_mem_signed, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_fence, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_fencei, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_amo, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_uses_ldq, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_uses_stq, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_unique, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_ldst, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_lrs1, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_lrs2, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_lrs3, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ldst_val, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_dst_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_frs3_en, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_fp_val, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_fp_single, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14]
output [64:0] io_ll_iresp_bits_data, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_predicated, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_valid, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_fflags_bits_uop_uopc, // @[execution-unit.scala:104:14]
output [31:0] io_ll_iresp_bits_fflags_bits_uop_inst, // @[execution-unit.scala:104:14]
output [31:0] io_ll_iresp_bits_fflags_bits_uop_debug_inst, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_rvc, // @[execution-unit.scala:104:14]
output [39:0] io_ll_iresp_bits_fflags_bits_uop_debug_pc, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_iq_type, // @[execution-unit.scala:104:14]
output [9:0] io_ll_iresp_bits_fflags_bits_uop_fu_code, // @[execution-unit.scala:104:14]
output [3:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_iw_state, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_br, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_jalr, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_jal, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_sfb, // @[execution-unit.scala:104:14]
output [15:0] io_ll_iresp_bits_fflags_bits_uop_br_mask, // @[execution-unit.scala:104:14]
output [3:0] io_ll_iresp_bits_fflags_bits_uop_br_tag, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_uop_ftq_idx, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_edge_inst, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_pc_lob, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_taken, // @[execution-unit.scala:104:14]
output [19:0] io_ll_iresp_bits_fflags_bits_uop_imm_packed, // @[execution-unit.scala:104:14]
output [11:0] io_ll_iresp_bits_fflags_bits_uop_csr_addr, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_fflags_bits_uop_rob_idx, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_uop_ldq_idx, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_uop_stq_idx, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_rxq_idx, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_fflags_bits_uop_pdst, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_fflags_bits_uop_prs1, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_fflags_bits_uop_prs2, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_fflags_bits_uop_prs3, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_uop_ppred, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_prs1_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_prs2_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_prs3_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ppred_busy, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_fflags_bits_uop_stale_pdst, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_exception, // @[execution-unit.scala:104:14]
output [63:0] io_ll_iresp_bits_fflags_bits_uop_exc_cause, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_bypassable, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_uop_mem_cmd, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_mem_size, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_mem_signed, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_fence, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_fencei, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_amo, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_uses_ldq, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_uses_stq, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_unique, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_ldst, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs1, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs2, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs3, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ldst_val, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_dst_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_frs3_en, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_fp_val, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_fp_single, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_flags, // @[execution-unit.scala:104:14]
input [15:0] io_brupdate_b1_resolve_mask, // @[execution-unit.scala:104:14]
input [15:0] io_brupdate_b1_mispredict_mask, // @[execution-unit.scala:104:14]
input [6:0] io_brupdate_b2_uop_uopc, // @[execution-unit.scala:104:14]
input [31:0] io_brupdate_b2_uop_inst, // @[execution-unit.scala:104:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_rvc, // @[execution-unit.scala:104:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_iq_type, // @[execution-unit.scala:104:14]
input [9:0] io_brupdate_b2_uop_fu_code, // @[execution-unit.scala:104:14]
input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_iw_state, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_br, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_jalr, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_jal, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_sfb, // @[execution-unit.scala:104:14]
input [15:0] io_brupdate_b2_uop_br_mask, // @[execution-unit.scala:104:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[execution-unit.scala:104:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_edge_inst, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_taken, // @[execution-unit.scala:104:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[execution-unit.scala:104:14]
input [11:0] io_brupdate_b2_uop_csr_addr, // @[execution-unit.scala:104:14]
input [6:0] io_brupdate_b2_uop_rob_idx, // @[execution-unit.scala:104:14]
input [4:0] io_brupdate_b2_uop_ldq_idx, // @[execution-unit.scala:104:14]
input [4:0] io_brupdate_b2_uop_stq_idx, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[execution-unit.scala:104:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[execution-unit.scala:104:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[execution-unit.scala:104:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[execution-unit.scala:104:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[execution-unit.scala:104:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_prs1_busy, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_prs2_busy, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_prs3_busy, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ppred_busy, // @[execution-unit.scala:104:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_exception, // @[execution-unit.scala:104:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_bypassable, // @[execution-unit.scala:104:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_mem_signed, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_fence, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_fencei, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_amo, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_uses_ldq, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_uses_stq, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_unique, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_flush_on_commit, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ldst_val, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_frs3_en, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_fp_val, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_fp_single, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_bp_debug_if, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[execution-unit.scala:104:14]
input io_brupdate_b2_valid, // @[execution-unit.scala:104:14]
input io_brupdate_b2_mispredict, // @[execution-unit.scala:104:14]
input io_brupdate_b2_taken, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_cfi_type, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_pc_sel, // @[execution-unit.scala:104:14]
input [39:0] io_brupdate_b2_jalr_target, // @[execution-unit.scala:104:14]
input [20:0] io_brupdate_b2_target_offset, // @[execution-unit.scala:104:14]
input io_status_debug, // @[execution-unit.scala:104:14]
input io_status_cease, // @[execution-unit.scala:104:14]
input io_status_wfi, // @[execution-unit.scala:104:14]
input [1:0] io_status_dprv, // @[execution-unit.scala:104:14]
input io_status_dv, // @[execution-unit.scala:104:14]
input [1:0] io_status_prv, // @[execution-unit.scala:104:14]
input io_status_v, // @[execution-unit.scala:104:14]
input io_status_sd, // @[execution-unit.scala:104:14]
input io_status_mpv, // @[execution-unit.scala:104:14]
input io_status_gva, // @[execution-unit.scala:104:14]
input io_status_tsr, // @[execution-unit.scala:104:14]
input io_status_tw, // @[execution-unit.scala:104:14]
input io_status_tvm, // @[execution-unit.scala:104:14]
input io_status_mxr, // @[execution-unit.scala:104:14]
input io_status_sum, // @[execution-unit.scala:104:14]
input io_status_mprv, // @[execution-unit.scala:104:14]
input [1:0] io_status_fs, // @[execution-unit.scala:104:14]
input [1:0] io_status_mpp, // @[execution-unit.scala:104:14]
input io_status_spp, // @[execution-unit.scala:104:14]
input io_status_mpie, // @[execution-unit.scala:104:14]
input io_status_spie, // @[execution-unit.scala:104:14]
input io_status_mie, // @[execution-unit.scala:104:14]
input io_status_sie, // @[execution-unit.scala:104:14]
input [2:0] io_fcsr_rm // @[execution-unit.scala:104:14]
);
wire _resp_arb_io_in_0_ready; // @[execution-unit.scala:563:26]
wire _resp_arb_io_in_1_ready; // @[execution-unit.scala:563:26]
wire _fp_sdq_io_enq_ready; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_valid; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_uop_uopc; // @[execution-unit.scala:551:24]
wire [31:0] _fp_sdq_io_deq_bits_uop_inst; // @[execution-unit.scala:551:24]
wire [31:0] _fp_sdq_io_deq_bits_uop_debug_inst; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_rvc; // @[execution-unit.scala:551:24]
wire [39:0] _fp_sdq_io_deq_bits_uop_debug_pc; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_iq_type; // @[execution-unit.scala:551:24]
wire [9:0] _fp_sdq_io_deq_bits_uop_fu_code; // @[execution-unit.scala:551:24]
wire [3:0] _fp_sdq_io_deq_bits_uop_ctrl_br_type; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ctrl_is_load; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ctrl_is_sta; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ctrl_is_std; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_iw_state; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_br; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_jalr; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_jal; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_sfb; // @[execution-unit.scala:551:24]
wire [15:0] _fp_sdq_io_deq_bits_uop_br_mask; // @[execution-unit.scala:551:24]
wire [3:0] _fp_sdq_io_deq_bits_uop_br_tag; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_uop_ftq_idx; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_edge_inst; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_pc_lob; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_taken; // @[execution-unit.scala:551:24]
wire [19:0] _fp_sdq_io_deq_bits_uop_imm_packed; // @[execution-unit.scala:551:24]
wire [11:0] _fp_sdq_io_deq_bits_uop_csr_addr; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_uop_rob_idx; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_uop_ldq_idx; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_uop_stq_idx; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_rxq_idx; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_uop_pdst; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_uop_prs1; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_uop_prs2; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_uop_prs3; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_uop_ppred; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_prs1_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_prs2_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_prs3_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ppred_busy; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_uop_stale_pdst; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_exception; // @[execution-unit.scala:551:24]
wire [63:0] _fp_sdq_io_deq_bits_uop_exc_cause; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_bypassable; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_uop_mem_cmd; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_mem_size; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_mem_signed; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_fence; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_fencei; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_amo; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_uses_ldq; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_uses_stq; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_unique; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_flush_on_commit; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ldst_is_rs1; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_ldst; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_lrs1; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_lrs2; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_lrs3; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ldst_val; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_dst_rtype; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_lrs1_rtype; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_lrs2_rtype; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_frs3_en; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_fp_val; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_fp_single; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_xcpt_pf_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_xcpt_ae_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_xcpt_ma_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_bp_debug_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_bp_xcpt_if; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_debug_fsrc; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_debug_tsrc; // @[execution-unit.scala:551:24]
wire [64:0] _fp_sdq_io_deq_bits_data; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_predicated; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_valid; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_fflags_bits_uop_uopc; // @[execution-unit.scala:551:24]
wire [31:0] _fp_sdq_io_deq_bits_fflags_bits_uop_inst; // @[execution-unit.scala:551:24]
wire [31:0] _fp_sdq_io_deq_bits_fflags_bits_uop_debug_inst; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_rvc; // @[execution-unit.scala:551:24]
wire [39:0] _fp_sdq_io_deq_bits_fflags_bits_uop_debug_pc; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_iq_type; // @[execution-unit.scala:551:24]
wire [9:0] _fp_sdq_io_deq_bits_fflags_bits_uop_fu_code; // @[execution-unit.scala:551:24]
wire [3:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_iw_state; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_br; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_jalr; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_jal; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_sfb; // @[execution-unit.scala:551:24]
wire [15:0] _fp_sdq_io_deq_bits_fflags_bits_uop_br_mask; // @[execution-unit.scala:551:24]
wire [3:0] _fp_sdq_io_deq_bits_fflags_bits_uop_br_tag; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_edge_inst; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_pc_lob; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_taken; // @[execution-unit.scala:551:24]
wire [19:0] _fp_sdq_io_deq_bits_fflags_bits_uop_imm_packed; // @[execution-unit.scala:551:24]
wire [11:0] _fp_sdq_io_deq_bits_fflags_bits_uop_csr_addr; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_fflags_bits_uop_rob_idx; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_uop_stq_idx; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_fflags_bits_uop_pdst; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_fflags_bits_uop_prs1; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_fflags_bits_uop_prs2; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_fflags_bits_uop_prs3; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ppred; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_exception; // @[execution-unit.scala:551:24]
wire [63:0] _fp_sdq_io_deq_bits_fflags_bits_uop_exc_cause; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_bypassable; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_mem_size; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_mem_signed; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_fence; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_fencei; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_amo; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_uses_stq; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_unique; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ldst; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_lrs1; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_lrs2; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_lrs3; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ldst_val; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_frs3_en; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_fp_val; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_fp_single; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_flags; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_empty; // @[execution-unit.scala:551:24]
wire _queue_io_enq_ready; // @[execution-unit.scala:537:23]
wire _queue_io_deq_valid; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_uop_uopc; // @[execution-unit.scala:537:23]
wire [31:0] _queue_io_deq_bits_uop_inst; // @[execution-unit.scala:537:23]
wire [31:0] _queue_io_deq_bits_uop_debug_inst; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_rvc; // @[execution-unit.scala:537:23]
wire [39:0] _queue_io_deq_bits_uop_debug_pc; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_iq_type; // @[execution-unit.scala:537:23]
wire [9:0] _queue_io_deq_bits_uop_fu_code; // @[execution-unit.scala:537:23]
wire [3:0] _queue_io_deq_bits_uop_ctrl_br_type; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ctrl_is_load; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ctrl_is_sta; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ctrl_is_std; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_iw_state; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_br; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_jalr; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_jal; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_sfb; // @[execution-unit.scala:537:23]
wire [15:0] _queue_io_deq_bits_uop_br_mask; // @[execution-unit.scala:537:23]
wire [3:0] _queue_io_deq_bits_uop_br_tag; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_uop_ftq_idx; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_edge_inst; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_pc_lob; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_taken; // @[execution-unit.scala:537:23]
wire [19:0] _queue_io_deq_bits_uop_imm_packed; // @[execution-unit.scala:537:23]
wire [11:0] _queue_io_deq_bits_uop_csr_addr; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_uop_rob_idx; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_uop_ldq_idx; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_uop_stq_idx; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_rxq_idx; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_uop_pdst; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_uop_prs1; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_uop_prs2; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_uop_prs3; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_uop_ppred; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_prs1_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_prs2_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_prs3_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ppred_busy; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_uop_stale_pdst; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_exception; // @[execution-unit.scala:537:23]
wire [63:0] _queue_io_deq_bits_uop_exc_cause; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_bypassable; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_uop_mem_cmd; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_mem_size; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_mem_signed; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_fence; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_fencei; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_amo; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_uses_ldq; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_uses_stq; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_unique; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_flush_on_commit; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ldst_is_rs1; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_ldst; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_lrs1; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_lrs2; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_lrs3; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ldst_val; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_dst_rtype; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_lrs1_rtype; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_lrs2_rtype; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_frs3_en; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_fp_val; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_fp_single; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_xcpt_pf_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_xcpt_ae_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_xcpt_ma_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_bp_debug_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_bp_xcpt_if; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_debug_fsrc; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_debug_tsrc; // @[execution-unit.scala:537:23]
wire [64:0] _queue_io_deq_bits_data; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_predicated; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_valid; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_fflags_bits_uop_uopc; // @[execution-unit.scala:537:23]
wire [31:0] _queue_io_deq_bits_fflags_bits_uop_inst; // @[execution-unit.scala:537:23]
wire [31:0] _queue_io_deq_bits_fflags_bits_uop_debug_inst; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_rvc; // @[execution-unit.scala:537:23]
wire [39:0] _queue_io_deq_bits_fflags_bits_uop_debug_pc; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_iq_type; // @[execution-unit.scala:537:23]
wire [9:0] _queue_io_deq_bits_fflags_bits_uop_fu_code; // @[execution-unit.scala:537:23]
wire [3:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_iw_state; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_br; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_jalr; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_jal; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_sfb; // @[execution-unit.scala:537:23]
wire [15:0] _queue_io_deq_bits_fflags_bits_uop_br_mask; // @[execution-unit.scala:537:23]
wire [3:0] _queue_io_deq_bits_fflags_bits_uop_br_tag; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_edge_inst; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_pc_lob; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_taken; // @[execution-unit.scala:537:23]
wire [19:0] _queue_io_deq_bits_fflags_bits_uop_imm_packed; // @[execution-unit.scala:537:23]
wire [11:0] _queue_io_deq_bits_fflags_bits_uop_csr_addr; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_fflags_bits_uop_rob_idx; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_uop_stq_idx; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_fflags_bits_uop_pdst; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_fflags_bits_uop_prs1; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_fflags_bits_uop_prs2; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_fflags_bits_uop_prs3; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_uop_ppred; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_exception; // @[execution-unit.scala:537:23]
wire [63:0] _queue_io_deq_bits_fflags_bits_uop_exc_cause; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_bypassable; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_mem_size; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_mem_signed; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_fence; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_fencei; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_amo; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_uses_stq; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_unique; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_ldst; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_lrs1; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_lrs2; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_lrs3; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ldst_val; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_frs3_en; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_fp_val; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_fp_single; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_flags; // @[execution-unit.scala:537:23]
wire _queue_io_empty; // @[execution-unit.scala:537:23]
wire _FDivSqrtUnit_io_req_ready; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_valid; // @[execution-unit.scala:502:22]
wire [6:0] _FDivSqrtUnit_io_resp_bits_uop_uopc; // @[execution-unit.scala:502:22]
wire [31:0] _FDivSqrtUnit_io_resp_bits_uop_inst; // @[execution-unit.scala:502:22]
wire [31:0] _FDivSqrtUnit_io_resp_bits_uop_debug_inst; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_rvc; // @[execution-unit.scala:502:22]
wire [39:0] _FDivSqrtUnit_io_resp_bits_uop_debug_pc; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_iq_type; // @[execution-unit.scala:502:22]
wire [9:0] _FDivSqrtUnit_io_resp_bits_uop_fu_code; // @[execution-unit.scala:502:22]
wire [3:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_br_type; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:502:22]
wire [4:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_load; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_sta; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_std; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_iw_state; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_br; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_jalr; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_jal; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_sfb; // @[execution-unit.scala:502:22]
wire [15:0] _FDivSqrtUnit_io_resp_bits_uop_br_mask; // @[execution-unit.scala:502:22]
wire [3:0] _FDivSqrtUnit_io_resp_bits_uop_br_tag; // @[execution-unit.scala:502:22]
wire [4:0] _FDivSqrtUnit_io_resp_bits_uop_ftq_idx; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_edge_inst; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_pc_lob; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_taken; // @[execution-unit.scala:502:22]
wire [19:0] _FDivSqrtUnit_io_resp_bits_uop_imm_packed; // @[execution-unit.scala:502:22]
wire [11:0] _FDivSqrtUnit_io_resp_bits_uop_csr_addr; // @[execution-unit.scala:502:22]
wire [6:0] _FDivSqrtUnit_io_resp_bits_uop_rob_idx; // @[execution-unit.scala:502:22]
wire [4:0] _FDivSqrtUnit_io_resp_bits_uop_ldq_idx; // @[execution-unit.scala:502:22]
wire [4:0] _FDivSqrtUnit_io_resp_bits_uop_stq_idx; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_rxq_idx; // @[execution-unit.scala:502:22]
wire [6:0] _FDivSqrtUnit_io_resp_bits_uop_pdst; // @[execution-unit.scala:502:22]
wire [6:0] _FDivSqrtUnit_io_resp_bits_uop_prs1; // @[execution-unit.scala:502:22]
wire [6:0] _FDivSqrtUnit_io_resp_bits_uop_prs2; // @[execution-unit.scala:502:22]
wire [6:0] _FDivSqrtUnit_io_resp_bits_uop_prs3; // @[execution-unit.scala:502:22]
wire [4:0] _FDivSqrtUnit_io_resp_bits_uop_ppred; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_prs1_busy; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_prs2_busy; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_prs3_busy; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ppred_busy; // @[execution-unit.scala:502:22]
wire [6:0] _FDivSqrtUnit_io_resp_bits_uop_stale_pdst; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_exception; // @[execution-unit.scala:502:22]
wire [63:0] _FDivSqrtUnit_io_resp_bits_uop_exc_cause; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_bypassable; // @[execution-unit.scala:502:22]
wire [4:0] _FDivSqrtUnit_io_resp_bits_uop_mem_cmd; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_mem_size; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_mem_signed; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_fence; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_fencei; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_amo; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_uses_ldq; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_uses_stq; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_unique; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_flush_on_commit; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ldst_is_rs1; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_ldst; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_lrs1; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_lrs2; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_lrs3; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ldst_val; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_dst_rtype; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_lrs1_rtype; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_lrs2_rtype; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_frs3_en; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_fp_val; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_fp_single; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_xcpt_pf_if; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_xcpt_ae_if; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_xcpt_ma_if; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_bp_debug_if; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_bp_xcpt_if; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_debug_fsrc; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_debug_tsrc; // @[execution-unit.scala:502:22]
wire [64:0] _FDivSqrtUnit_io_resp_bits_data; // @[execution-unit.scala:502:22]
wire _FPUUnit_io_resp_valid; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_uop_uopc; // @[execution-unit.scala:477:17]
wire [31:0] _FPUUnit_io_resp_bits_uop_inst; // @[execution-unit.scala:477:17]
wire [31:0] _FPUUnit_io_resp_bits_uop_debug_inst; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_rvc; // @[execution-unit.scala:477:17]
wire [39:0] _FPUUnit_io_resp_bits_uop_debug_pc; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_iq_type; // @[execution-unit.scala:477:17]
wire [9:0] _FPUUnit_io_resp_bits_uop_fu_code; // @[execution-unit.scala:477:17]
wire [3:0] _FPUUnit_io_resp_bits_uop_ctrl_br_type; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ctrl_is_load; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ctrl_is_sta; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ctrl_is_std; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_iw_state; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_br; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_jalr; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_jal; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_sfb; // @[execution-unit.scala:477:17]
wire [15:0] _FPUUnit_io_resp_bits_uop_br_mask; // @[execution-unit.scala:477:17]
wire [3:0] _FPUUnit_io_resp_bits_uop_br_tag; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_uop_ftq_idx; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_edge_inst; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_pc_lob; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_taken; // @[execution-unit.scala:477:17]
wire [19:0] _FPUUnit_io_resp_bits_uop_imm_packed; // @[execution-unit.scala:477:17]
wire [11:0] _FPUUnit_io_resp_bits_uop_csr_addr; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_uop_rob_idx; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_uop_ldq_idx; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_uop_stq_idx; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_rxq_idx; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_uop_pdst; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_uop_prs1; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_uop_prs2; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_uop_prs3; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_uop_ppred; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_prs1_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_prs2_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_prs3_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ppred_busy; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_uop_stale_pdst; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_exception; // @[execution-unit.scala:477:17]
wire [63:0] _FPUUnit_io_resp_bits_uop_exc_cause; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_bypassable; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_uop_mem_cmd; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_mem_size; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_mem_signed; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_fence; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_fencei; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_amo; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_uses_ldq; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_uses_stq; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_unique; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_flush_on_commit; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ldst_is_rs1; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_ldst; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_lrs1; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_lrs2; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_lrs3; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ldst_val; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_dst_rtype; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_lrs1_rtype; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_lrs2_rtype; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_frs3_en; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_fp_val; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_fp_single; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_xcpt_pf_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_xcpt_ae_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_xcpt_ma_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_bp_debug_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_bp_xcpt_if; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_debug_fsrc; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_debug_tsrc; // @[execution-unit.scala:477:17]
wire [64:0] _FPUUnit_io_resp_bits_data; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_valid; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_fflags_bits_uop_uopc; // @[execution-unit.scala:477:17]
wire [31:0] _FPUUnit_io_resp_bits_fflags_bits_uop_inst; // @[execution-unit.scala:477:17]
wire [31:0] _FPUUnit_io_resp_bits_fflags_bits_uop_debug_inst; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_rvc; // @[execution-unit.scala:477:17]
wire [39:0] _FPUUnit_io_resp_bits_fflags_bits_uop_debug_pc; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_iq_type; // @[execution-unit.scala:477:17]
wire [9:0] _FPUUnit_io_resp_bits_fflags_bits_uop_fu_code; // @[execution-unit.scala:477:17]
wire [3:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_iw_state; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_br; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_jalr; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_jal; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_sfb; // @[execution-unit.scala:477:17]
wire [15:0] _FPUUnit_io_resp_bits_fflags_bits_uop_br_mask; // @[execution-unit.scala:477:17]
wire [3:0] _FPUUnit_io_resp_bits_fflags_bits_uop_br_tag; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_edge_inst; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_pc_lob; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_taken; // @[execution-unit.scala:477:17]
wire [19:0] _FPUUnit_io_resp_bits_fflags_bits_uop_imm_packed; // @[execution-unit.scala:477:17]
wire [11:0] _FPUUnit_io_resp_bits_fflags_bits_uop_csr_addr; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_fflags_bits_uop_rob_idx; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_uop_stq_idx; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_fflags_bits_uop_pdst; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_fflags_bits_uop_prs1; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_fflags_bits_uop_prs2; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_fflags_bits_uop_prs3; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ppred; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_exception; // @[execution-unit.scala:477:17]
wire [63:0] _FPUUnit_io_resp_bits_fflags_bits_uop_exc_cause; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_bypassable; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_mem_size; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_mem_signed; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_fence; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_fencei; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_amo; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_uses_stq; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_unique; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ldst; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_lrs1; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_lrs2; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_lrs3; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ldst_val; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_frs3_en; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_fp_val; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_fp_single; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_flags; // @[execution-unit.scala:477:17]
wire io_req_valid_0 = io_req_valid; // @[execution-unit.scala:437:7]
wire [6:0] io_req_bits_uop_uopc_0 = io_req_bits_uop_uopc; // @[execution-unit.scala:437:7]
wire [31:0] io_req_bits_uop_inst_0 = io_req_bits_uop_inst; // @[execution-unit.scala:437:7]
wire [31:0] io_req_bits_uop_debug_inst_0 = io_req_bits_uop_debug_inst; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_rvc_0 = io_req_bits_uop_is_rvc; // @[execution-unit.scala:437:7]
wire [39:0] io_req_bits_uop_debug_pc_0 = io_req_bits_uop_debug_pc; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_iq_type_0 = io_req_bits_uop_iq_type; // @[execution-unit.scala:437:7]
wire [9:0] io_req_bits_uop_fu_code_0 = io_req_bits_uop_fu_code; // @[execution-unit.scala:437:7]
wire [3:0] io_req_bits_uop_ctrl_br_type_0 = io_req_bits_uop_ctrl_br_type; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_ctrl_op1_sel_0 = io_req_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_ctrl_op2_sel_0 = io_req_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_ctrl_imm_sel_0 = io_req_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:437:7]
wire [4:0] io_req_bits_uop_ctrl_op_fcn_0 = io_req_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ctrl_fcn_dw_0 = io_req_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_ctrl_csr_cmd_0 = io_req_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ctrl_is_load_0 = io_req_bits_uop_ctrl_is_load; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ctrl_is_sta_0 = io_req_bits_uop_ctrl_is_sta; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ctrl_is_std_0 = io_req_bits_uop_ctrl_is_std; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_iw_state_0 = io_req_bits_uop_iw_state; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_iw_p1_poisoned_0 = io_req_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_iw_p2_poisoned_0 = io_req_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_br_0 = io_req_bits_uop_is_br; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_jalr_0 = io_req_bits_uop_is_jalr; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_jal_0 = io_req_bits_uop_is_jal; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_sfb_0 = io_req_bits_uop_is_sfb; // @[execution-unit.scala:437:7]
wire [15:0] io_req_bits_uop_br_mask_0 = io_req_bits_uop_br_mask; // @[execution-unit.scala:437:7]
wire [3:0] io_req_bits_uop_br_tag_0 = io_req_bits_uop_br_tag; // @[execution-unit.scala:437:7]
wire [4:0] io_req_bits_uop_ftq_idx_0 = io_req_bits_uop_ftq_idx; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_edge_inst_0 = io_req_bits_uop_edge_inst; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_pc_lob_0 = io_req_bits_uop_pc_lob; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_taken_0 = io_req_bits_uop_taken; // @[execution-unit.scala:437:7]
wire [19:0] io_req_bits_uop_imm_packed_0 = io_req_bits_uop_imm_packed; // @[execution-unit.scala:437:7]
wire [11:0] io_req_bits_uop_csr_addr_0 = io_req_bits_uop_csr_addr; // @[execution-unit.scala:437:7]
wire [6:0] io_req_bits_uop_rob_idx_0 = io_req_bits_uop_rob_idx; // @[execution-unit.scala:437:7]
wire [4:0] io_req_bits_uop_ldq_idx_0 = io_req_bits_uop_ldq_idx; // @[execution-unit.scala:437:7]
wire [4:0] io_req_bits_uop_stq_idx_0 = io_req_bits_uop_stq_idx; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_rxq_idx_0 = io_req_bits_uop_rxq_idx; // @[execution-unit.scala:437:7]
wire [6:0] io_req_bits_uop_pdst_0 = io_req_bits_uop_pdst; // @[execution-unit.scala:437:7]
wire [6:0] io_req_bits_uop_prs1_0 = io_req_bits_uop_prs1; // @[execution-unit.scala:437:7]
wire [6:0] io_req_bits_uop_prs2_0 = io_req_bits_uop_prs2; // @[execution-unit.scala:437:7]
wire [6:0] io_req_bits_uop_prs3_0 = io_req_bits_uop_prs3; // @[execution-unit.scala:437:7]
wire [4:0] io_req_bits_uop_ppred_0 = io_req_bits_uop_ppred; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_prs1_busy_0 = io_req_bits_uop_prs1_busy; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_prs2_busy_0 = io_req_bits_uop_prs2_busy; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_prs3_busy_0 = io_req_bits_uop_prs3_busy; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ppred_busy_0 = io_req_bits_uop_ppred_busy; // @[execution-unit.scala:437:7]
wire [6:0] io_req_bits_uop_stale_pdst_0 = io_req_bits_uop_stale_pdst; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_exception_0 = io_req_bits_uop_exception; // @[execution-unit.scala:437:7]
wire [63:0] io_req_bits_uop_exc_cause_0 = io_req_bits_uop_exc_cause; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_bypassable_0 = io_req_bits_uop_bypassable; // @[execution-unit.scala:437:7]
wire [4:0] io_req_bits_uop_mem_cmd_0 = io_req_bits_uop_mem_cmd; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_mem_size_0 = io_req_bits_uop_mem_size; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_mem_signed_0 = io_req_bits_uop_mem_signed; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_fence_0 = io_req_bits_uop_is_fence; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_fencei_0 = io_req_bits_uop_is_fencei; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_amo_0 = io_req_bits_uop_is_amo; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_uses_ldq_0 = io_req_bits_uop_uses_ldq; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_uses_stq_0 = io_req_bits_uop_uses_stq; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_sys_pc2epc_0 = io_req_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_unique_0 = io_req_bits_uop_is_unique; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_flush_on_commit_0 = io_req_bits_uop_flush_on_commit; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ldst_is_rs1_0 = io_req_bits_uop_ldst_is_rs1; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_ldst_0 = io_req_bits_uop_ldst; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_lrs1_0 = io_req_bits_uop_lrs1; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_lrs2_0 = io_req_bits_uop_lrs2; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_lrs3_0 = io_req_bits_uop_lrs3; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ldst_val_0 = io_req_bits_uop_ldst_val; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_dst_rtype_0 = io_req_bits_uop_dst_rtype; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_lrs1_rtype_0 = io_req_bits_uop_lrs1_rtype; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_lrs2_rtype_0 = io_req_bits_uop_lrs2_rtype; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_frs3_en_0 = io_req_bits_uop_frs3_en; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_fp_val_0 = io_req_bits_uop_fp_val; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_fp_single_0 = io_req_bits_uop_fp_single; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_xcpt_pf_if_0 = io_req_bits_uop_xcpt_pf_if; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_xcpt_ae_if_0 = io_req_bits_uop_xcpt_ae_if; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_xcpt_ma_if_0 = io_req_bits_uop_xcpt_ma_if; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_bp_debug_if_0 = io_req_bits_uop_bp_debug_if; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_bp_xcpt_if_0 = io_req_bits_uop_bp_xcpt_if; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_debug_fsrc_0 = io_req_bits_uop_debug_fsrc; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_debug_tsrc_0 = io_req_bits_uop_debug_tsrc; // @[execution-unit.scala:437:7]
wire [64:0] io_req_bits_rs1_data_0 = io_req_bits_rs1_data; // @[execution-unit.scala:437:7]
wire [64:0] io_req_bits_rs2_data_0 = io_req_bits_rs2_data; // @[execution-unit.scala:437:7]
wire [64:0] io_req_bits_rs3_data_0 = io_req_bits_rs3_data; // @[execution-unit.scala:437:7]
wire io_req_bits_kill_0 = io_req_bits_kill; // @[execution-unit.scala:437:7]
wire io_ll_iresp_ready_0 = io_ll_iresp_ready; // @[execution-unit.scala:437:7]
wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[execution-unit.scala:437:7]
wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[execution-unit.scala:437:7]
wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[execution-unit.scala:437:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[execution-unit.scala:437:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[execution-unit.scala:437:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[execution-unit.scala:437:7]
wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[execution-unit.scala:437:7]
wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[execution-unit.scala:437:7]
wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[execution-unit.scala:437:7]
wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[execution-unit.scala:437:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[execution-unit.scala:437:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[execution-unit.scala:437:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[execution-unit.scala:437:7]
wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[execution-unit.scala:437:7]
wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[execution-unit.scala:437:7]
wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[execution-unit.scala:437:7]
wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[execution-unit.scala:437:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[execution-unit.scala:437:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[execution-unit.scala:437:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[execution-unit.scala:437:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[execution-unit.scala:437:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[execution-unit.scala:437:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[execution-unit.scala:437:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[execution-unit.scala:437:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[execution-unit.scala:437:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[execution-unit.scala:437:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[execution-unit.scala:437:7]
wire io_status_debug_0 = io_status_debug; // @[execution-unit.scala:437:7]
wire io_status_cease_0 = io_status_cease; // @[execution-unit.scala:437:7]
wire io_status_wfi_0 = io_status_wfi; // @[execution-unit.scala:437:7]
wire [1:0] io_status_dprv_0 = io_status_dprv; // @[execution-unit.scala:437:7]
wire io_status_dv_0 = io_status_dv; // @[execution-unit.scala:437:7]
wire [1:0] io_status_prv_0 = io_status_prv; // @[execution-unit.scala:437:7]
wire io_status_v_0 = io_status_v; // @[execution-unit.scala:437:7]
wire io_status_sd_0 = io_status_sd; // @[execution-unit.scala:437:7]
wire io_status_mpv_0 = io_status_mpv; // @[execution-unit.scala:437:7]
wire io_status_gva_0 = io_status_gva; // @[execution-unit.scala:437:7]
wire io_status_tsr_0 = io_status_tsr; // @[execution-unit.scala:437:7]
wire io_status_tw_0 = io_status_tw; // @[execution-unit.scala:437:7]
wire io_status_tvm_0 = io_status_tvm; // @[execution-unit.scala:437:7]
wire io_status_mxr_0 = io_status_mxr; // @[execution-unit.scala:437:7]
wire io_status_sum_0 = io_status_sum; // @[execution-unit.scala:437:7]
wire io_status_mprv_0 = io_status_mprv; // @[execution-unit.scala:437:7]
wire [1:0] io_status_fs_0 = io_status_fs; // @[execution-unit.scala:437:7]
wire [1:0] io_status_mpp_0 = io_status_mpp; // @[execution-unit.scala:437:7]
wire io_status_spp_0 = io_status_spp; // @[execution-unit.scala:437:7]
wire io_status_mpie_0 = io_status_mpie; // @[execution-unit.scala:437:7]
wire io_status_spie_0 = io_status_spie; // @[execution-unit.scala:437:7]
wire io_status_mie_0 = io_status_mie; // @[execution-unit.scala:437:7]
wire io_status_sie_0 = io_status_sie; // @[execution-unit.scala:437:7]
wire [2:0] io_fcsr_rm_0 = io_fcsr_rm; // @[execution-unit.scala:437:7]
wire [31:0] io_status_isa = 32'h14112D; // @[execution-unit.scala:437:7]
wire [22:0] io_status_zero2 = 23'h0; // @[execution-unit.scala:437:7]
wire io_req_ready = 1'h0; // @[execution-unit.scala:437:7]
wire io_req_bits_pred_data = 1'h0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_predicated = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_mbe = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_sbe = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_sd_rv32 = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_ube = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_upie = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_hie = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_uie = 1'h0; // @[execution-unit.scala:437:7]
wire [7:0] io_status_zero1 = 8'h0; // @[execution-unit.scala:437:7]
wire [1:0] io_status_xs = 2'h0; // @[execution-unit.scala:437:7]
wire [1:0] io_status_vs = 2'h0; // @[execution-unit.scala:437:7]
wire io_fresp_ready = 1'h1; // @[execution-unit.scala:437:7]
wire [1:0] io_status_sxl = 2'h2; // @[execution-unit.scala:437:7]
wire [1:0] io_status_uxl = 2'h2; // @[execution-unit.scala:437:7]
wire [9:0] _io_fu_types_T = 10'h40; // @[execution-unit.scala:467:21]
wire [9:0] _io_fu_types_T_8; // @[execution-unit.scala:468:60]
wire _io_fresp_valid_T_5; // @[execution-unit.scala:525:69]
wire [6:0] _io_fresp_bits_uop_T_uopc; // @[Mux.scala:50:70]
wire [31:0] _io_fresp_bits_uop_T_inst; // @[Mux.scala:50:70]
wire [31:0] _io_fresp_bits_uop_T_debug_inst; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_rvc; // @[Mux.scala:50:70]
wire [39:0] _io_fresp_bits_uop_T_debug_pc; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_iq_type; // @[Mux.scala:50:70]
wire [9:0] _io_fresp_bits_uop_T_fu_code; // @[Mux.scala:50:70]
wire [3:0] _io_fresp_bits_uop_T_ctrl_br_type; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_ctrl_op1_sel; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_ctrl_op2_sel; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_ctrl_imm_sel; // @[Mux.scala:50:70]
wire [4:0] _io_fresp_bits_uop_T_ctrl_op_fcn; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ctrl_fcn_dw; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_ctrl_csr_cmd; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ctrl_is_load; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ctrl_is_sta; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ctrl_is_std; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_iw_state; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_iw_p1_poisoned; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_iw_p2_poisoned; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_br; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_jalr; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_jal; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_sfb; // @[Mux.scala:50:70]
wire [15:0] _io_fresp_bits_uop_T_br_mask; // @[Mux.scala:50:70]
wire [3:0] _io_fresp_bits_uop_T_br_tag; // @[Mux.scala:50:70]
wire [4:0] _io_fresp_bits_uop_T_ftq_idx; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_edge_inst; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_pc_lob; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_taken; // @[Mux.scala:50:70]
wire [19:0] _io_fresp_bits_uop_T_imm_packed; // @[Mux.scala:50:70]
wire [11:0] _io_fresp_bits_uop_T_csr_addr; // @[Mux.scala:50:70]
wire [6:0] _io_fresp_bits_uop_T_rob_idx; // @[Mux.scala:50:70]
wire [4:0] _io_fresp_bits_uop_T_ldq_idx; // @[Mux.scala:50:70]
wire [4:0] _io_fresp_bits_uop_T_stq_idx; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_rxq_idx; // @[Mux.scala:50:70]
wire [6:0] _io_fresp_bits_uop_T_pdst; // @[Mux.scala:50:70]
wire [6:0] _io_fresp_bits_uop_T_prs1; // @[Mux.scala:50:70]
wire [6:0] _io_fresp_bits_uop_T_prs2; // @[Mux.scala:50:70]
wire [6:0] _io_fresp_bits_uop_T_prs3; // @[Mux.scala:50:70]
wire [4:0] _io_fresp_bits_uop_T_ppred; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_prs1_busy; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_prs2_busy; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_prs3_busy; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ppred_busy; // @[Mux.scala:50:70]
wire [6:0] _io_fresp_bits_uop_T_stale_pdst; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_exception; // @[Mux.scala:50:70]
wire [63:0] _io_fresp_bits_uop_T_exc_cause; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_bypassable; // @[Mux.scala:50:70]
wire [4:0] _io_fresp_bits_uop_T_mem_cmd; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_mem_size; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_mem_signed; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_fence; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_fencei; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_amo; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_uses_ldq; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_uses_stq; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_sys_pc2epc; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_unique; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_flush_on_commit; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ldst_is_rs1; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_ldst; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_lrs1; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_lrs2; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_lrs3; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ldst_val; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_dst_rtype; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_lrs1_rtype; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_lrs2_rtype; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_frs3_en; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_fp_val; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_fp_single; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_xcpt_pf_if; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_xcpt_ae_if; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_xcpt_ma_if; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_bp_debug_if; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_bp_xcpt_if; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_debug_fsrc; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_debug_tsrc; // @[Mux.scala:50:70]
wire [64:0] _io_fresp_bits_data_T; // @[Mux.scala:50:70]
wire _io_fresp_bits_fflags_T_valid; // @[execution-unit.scala:530:30]
wire [6:0] _io_fresp_bits_fflags_T_bits_uop_uopc; // @[execution-unit.scala:530:30]
wire [31:0] _io_fresp_bits_fflags_T_bits_uop_inst; // @[execution-unit.scala:530:30]
wire [31:0] _io_fresp_bits_fflags_T_bits_uop_debug_inst; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_rvc; // @[execution-unit.scala:530:30]
wire [39:0] _io_fresp_bits_fflags_T_bits_uop_debug_pc; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_iq_type; // @[execution-unit.scala:530:30]
wire [9:0] _io_fresp_bits_fflags_T_bits_uop_fu_code; // @[execution-unit.scala:530:30]
wire [3:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_br_type; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ctrl_is_load; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ctrl_is_sta; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ctrl_is_std; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_iw_state; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_br; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_jalr; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_jal; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_sfb; // @[execution-unit.scala:530:30]
wire [15:0] _io_fresp_bits_fflags_T_bits_uop_br_mask; // @[execution-unit.scala:530:30]
wire [3:0] _io_fresp_bits_fflags_T_bits_uop_br_tag; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_uop_ftq_idx; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_edge_inst; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_pc_lob; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_taken; // @[execution-unit.scala:530:30]
wire [19:0] _io_fresp_bits_fflags_T_bits_uop_imm_packed; // @[execution-unit.scala:530:30]
wire [11:0] _io_fresp_bits_fflags_T_bits_uop_csr_addr; // @[execution-unit.scala:530:30]
wire [6:0] _io_fresp_bits_fflags_T_bits_uop_rob_idx; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_uop_ldq_idx; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_uop_stq_idx; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_rxq_idx; // @[execution-unit.scala:530:30]
wire [6:0] _io_fresp_bits_fflags_T_bits_uop_pdst; // @[execution-unit.scala:530:30]
wire [6:0] _io_fresp_bits_fflags_T_bits_uop_prs1; // @[execution-unit.scala:530:30]
wire [6:0] _io_fresp_bits_fflags_T_bits_uop_prs2; // @[execution-unit.scala:530:30]
wire [6:0] _io_fresp_bits_fflags_T_bits_uop_prs3; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_uop_ppred; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_prs1_busy; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_prs2_busy; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_prs3_busy; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ppred_busy; // @[execution-unit.scala:530:30]
wire [6:0] _io_fresp_bits_fflags_T_bits_uop_stale_pdst; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_exception; // @[execution-unit.scala:530:30]
wire [63:0] _io_fresp_bits_fflags_T_bits_uop_exc_cause; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_bypassable; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_uop_mem_cmd; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_mem_size; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_mem_signed; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_fence; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_fencei; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_amo; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_uses_ldq; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_uses_stq; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_unique; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_flush_on_commit; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ldst_is_rs1; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_ldst; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_lrs1; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_lrs2; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_lrs3; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ldst_val; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_dst_rtype; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_lrs1_rtype; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_lrs2_rtype; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_frs3_en; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_fp_val; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_fp_single; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_xcpt_pf_if; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_xcpt_ae_if; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_xcpt_ma_if; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_bp_debug_if; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_bp_xcpt_if; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_debug_fsrc; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_debug_tsrc; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_flags; // @[execution-unit.scala:530:30]
wire [3:0] io_fresp_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_uop_uopc_0; // @[execution-unit.scala:437:7]
wire [31:0] io_fresp_bits_uop_inst_0; // @[execution-unit.scala:437:7]
wire [31:0] io_fresp_bits_uop_debug_inst_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_rvc_0; // @[execution-unit.scala:437:7]
wire [39:0] io_fresp_bits_uop_debug_pc_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_iq_type_0; // @[execution-unit.scala:437:7]
wire [9:0] io_fresp_bits_uop_fu_code_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_iw_state_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_br_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_jalr_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_jal_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_sfb_0; // @[execution-unit.scala:437:7]
wire [15:0] io_fresp_bits_uop_br_mask_0; // @[execution-unit.scala:437:7]
wire [3:0] io_fresp_bits_uop_br_tag_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_uop_ftq_idx_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_edge_inst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_pc_lob_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_taken_0; // @[execution-unit.scala:437:7]
wire [19:0] io_fresp_bits_uop_imm_packed_0; // @[execution-unit.scala:437:7]
wire [11:0] io_fresp_bits_uop_csr_addr_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_uop_rob_idx_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_uop_ldq_idx_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_uop_stq_idx_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_rxq_idx_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_uop_pdst_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_uop_prs1_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_uop_prs2_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_uop_prs3_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_uop_ppred_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_prs1_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_prs2_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_prs3_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ppred_busy_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_uop_stale_pdst_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_exception_0; // @[execution-unit.scala:437:7]
wire [63:0] io_fresp_bits_uop_exc_cause_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_bypassable_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_uop_mem_cmd_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_mem_size_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_mem_signed_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_fence_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_fencei_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_amo_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_uses_ldq_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_uses_stq_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_unique_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_flush_on_commit_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_ldst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_lrs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_lrs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_lrs3_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ldst_val_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_dst_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_frs3_en_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_fp_val_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_fp_single_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_bp_debug_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_debug_fsrc_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_debug_tsrc_0; // @[execution-unit.scala:437:7]
wire [3:0] io_fresp_bits_fflags_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_fflags_bits_uop_uopc_0; // @[execution-unit.scala:437:7]
wire [31:0] io_fresp_bits_fflags_bits_uop_inst_0; // @[execution-unit.scala:437:7]
wire [31:0] io_fresp_bits_fflags_bits_uop_debug_inst_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_rvc_0; // @[execution-unit.scala:437:7]
wire [39:0] io_fresp_bits_fflags_bits_uop_debug_pc_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_iq_type_0; // @[execution-unit.scala:437:7]
wire [9:0] io_fresp_bits_fflags_bits_uop_fu_code_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_iw_state_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_br_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_jalr_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_jal_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_sfb_0; // @[execution-unit.scala:437:7]
wire [15:0] io_fresp_bits_fflags_bits_uop_br_mask_0; // @[execution-unit.scala:437:7]
wire [3:0] io_fresp_bits_fflags_bits_uop_br_tag_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_uop_ftq_idx_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_edge_inst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_pc_lob_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_taken_0; // @[execution-unit.scala:437:7]
wire [19:0] io_fresp_bits_fflags_bits_uop_imm_packed_0; // @[execution-unit.scala:437:7]
wire [11:0] io_fresp_bits_fflags_bits_uop_csr_addr_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_fflags_bits_uop_rob_idx_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_uop_ldq_idx_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_uop_stq_idx_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_rxq_idx_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_fflags_bits_uop_pdst_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_fflags_bits_uop_prs1_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_fflags_bits_uop_prs2_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_fflags_bits_uop_prs3_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_uop_ppred_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_prs1_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_prs2_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_prs3_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ppred_busy_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_fflags_bits_uop_stale_pdst_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_exception_0; // @[execution-unit.scala:437:7]
wire [63:0] io_fresp_bits_fflags_bits_uop_exc_cause_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_bypassable_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_uop_mem_cmd_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_mem_size_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_mem_signed_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_fence_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_fencei_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_amo_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_uses_ldq_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_uses_stq_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_unique_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_flush_on_commit_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_ldst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_lrs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_lrs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_lrs3_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ldst_val_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_dst_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_frs3_en_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_fp_val_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_fp_single_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_bp_debug_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_debug_fsrc_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_debug_tsrc_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_flags_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_valid_0; // @[execution-unit.scala:437:7]
wire [64:0] io_fresp_bits_data_0; // @[execution-unit.scala:437:7]
wire io_fresp_valid_0; // @[execution-unit.scala:437:7]
wire [3:0] io_ll_iresp_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_uop_uopc_0; // @[execution-unit.scala:437:7]
wire [31:0] io_ll_iresp_bits_uop_inst_0; // @[execution-unit.scala:437:7]
wire [31:0] io_ll_iresp_bits_uop_debug_inst_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_rvc_0; // @[execution-unit.scala:437:7]
wire [39:0] io_ll_iresp_bits_uop_debug_pc_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_iq_type_0; // @[execution-unit.scala:437:7]
wire [9:0] io_ll_iresp_bits_uop_fu_code_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_iw_state_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_br_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_jalr_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_jal_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_sfb_0; // @[execution-unit.scala:437:7]
wire [15:0] io_ll_iresp_bits_uop_br_mask_0; // @[execution-unit.scala:437:7]
wire [3:0] io_ll_iresp_bits_uop_br_tag_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_uop_ftq_idx_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_edge_inst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_pc_lob_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_taken_0; // @[execution-unit.scala:437:7]
wire [19:0] io_ll_iresp_bits_uop_imm_packed_0; // @[execution-unit.scala:437:7]
wire [11:0] io_ll_iresp_bits_uop_csr_addr_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_uop_rob_idx_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_uop_ldq_idx_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_uop_stq_idx_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_rxq_idx_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_uop_pdst_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_uop_prs1_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_uop_prs2_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_uop_prs3_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_uop_ppred_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_prs1_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_prs2_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_prs3_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ppred_busy_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_uop_stale_pdst_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_exception_0; // @[execution-unit.scala:437:7]
wire [63:0] io_ll_iresp_bits_uop_exc_cause_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_bypassable_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_uop_mem_cmd_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_mem_size_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_mem_signed_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_fence_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_fencei_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_amo_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_uses_ldq_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_uses_stq_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_unique_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_flush_on_commit_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_ldst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_lrs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_lrs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_lrs3_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ldst_val_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_dst_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_frs3_en_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_fp_val_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_fp_single_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_bp_debug_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_debug_fsrc_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_debug_tsrc_0; // @[execution-unit.scala:437:7]
wire [3:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_fflags_bits_uop_uopc_0; // @[execution-unit.scala:437:7]
wire [31:0] io_ll_iresp_bits_fflags_bits_uop_inst_0; // @[execution-unit.scala:437:7]
wire [31:0] io_ll_iresp_bits_fflags_bits_uop_debug_inst_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_rvc_0; // @[execution-unit.scala:437:7]
wire [39:0] io_ll_iresp_bits_fflags_bits_uop_debug_pc_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_iq_type_0; // @[execution-unit.scala:437:7]
wire [9:0] io_ll_iresp_bits_fflags_bits_uop_fu_code_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_iw_state_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_br_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_jalr_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_jal_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_sfb_0; // @[execution-unit.scala:437:7]
wire [15:0] io_ll_iresp_bits_fflags_bits_uop_br_mask_0; // @[execution-unit.scala:437:7]
wire [3:0] io_ll_iresp_bits_fflags_bits_uop_br_tag_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_uop_ftq_idx_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_edge_inst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_pc_lob_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_taken_0; // @[execution-unit.scala:437:7]
wire [19:0] io_ll_iresp_bits_fflags_bits_uop_imm_packed_0; // @[execution-unit.scala:437:7]
wire [11:0] io_ll_iresp_bits_fflags_bits_uop_csr_addr_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_fflags_bits_uop_rob_idx_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_uop_ldq_idx_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_uop_stq_idx_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_rxq_idx_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_fflags_bits_uop_pdst_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_fflags_bits_uop_prs1_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_fflags_bits_uop_prs2_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_fflags_bits_uop_prs3_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_uop_ppred_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_prs1_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_prs2_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_prs3_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ppred_busy_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_fflags_bits_uop_stale_pdst_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_exception_0; // @[execution-unit.scala:437:7]
wire [63:0] io_ll_iresp_bits_fflags_bits_uop_exc_cause_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_bypassable_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_uop_mem_cmd_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_mem_size_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_mem_signed_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_fence_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_fencei_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_amo_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_uses_ldq_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_uses_stq_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_unique_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_flush_on_commit_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_ldst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs3_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ldst_val_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_dst_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_frs3_en_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_fp_val_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_fp_single_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_bp_debug_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_debug_fsrc_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_debug_tsrc_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_flags_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_valid_0; // @[execution-unit.scala:437:7]
wire [64:0] io_ll_iresp_bits_data_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_predicated_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_valid_0; // @[execution-unit.scala:437:7]
wire [9:0] io_fu_types_0; // @[execution-unit.scala:437:7]
wire _fdiv_busy_T_4; // @[execution-unit.scala:516:41]
wire fdiv_busy; // @[execution-unit.scala:461:27]
wire _fpiu_busy_T_1; // @[execution-unit.scala:568:18]
wire fpiu_busy; // @[execution-unit.scala:462:27]
wire _io_fu_types_T_1 = ~fdiv_busy; // @[execution-unit.scala:461:27, :468:22]
wire _io_fu_types_T_2 = _io_fu_types_T_1; // @[execution-unit.scala:468:{22,33}]
wire [9:0] _io_fu_types_T_3 = {2'h0, _io_fu_types_T_2, 7'h0}; // @[execution-unit.scala:468:{21,33}]
wire [9:0] _io_fu_types_T_4 = _io_fu_types_T_3 | 10'h40; // @[execution-unit.scala:467:45, :468:21]
wire _io_fu_types_T_5 = ~fpiu_busy; // @[execution-unit.scala:462:27, :469:22]
wire _io_fu_types_T_6 = _io_fu_types_T_5; // @[execution-unit.scala:469:{22,33}]
wire [9:0] _io_fu_types_T_7 = {_io_fu_types_T_6, 9'h0}; // @[execution-unit.scala:469:{21,33}]
assign _io_fu_types_T_8 = _io_fu_types_T_4 | _io_fu_types_T_7; // @[execution-unit.scala:467:45, :468:60, :469:21]
assign io_fu_types_0 = _io_fu_types_T_8; // @[execution-unit.scala:437:7, :468:60]
wire fpu_resp_val; // @[execution-unit.scala:473:30]
wire [3:0] fpu_resp_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:474:29]
wire [6:0] fpu_resp_fflags_bits_uop_uopc; // @[execution-unit.scala:474:29]
wire [31:0] fpu_resp_fflags_bits_uop_inst; // @[execution-unit.scala:474:29]
wire [31:0] fpu_resp_fflags_bits_uop_debug_inst; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_rvc; // @[execution-unit.scala:474:29]
wire [39:0] fpu_resp_fflags_bits_uop_debug_pc; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_iq_type; // @[execution-unit.scala:474:29]
wire [9:0] fpu_resp_fflags_bits_uop_fu_code; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_iw_state; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_br; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_jalr; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_jal; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_sfb; // @[execution-unit.scala:474:29]
wire [15:0] fpu_resp_fflags_bits_uop_br_mask; // @[execution-unit.scala:474:29]
wire [3:0] fpu_resp_fflags_bits_uop_br_tag; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_edge_inst; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_pc_lob; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_taken; // @[execution-unit.scala:474:29]
wire [19:0] fpu_resp_fflags_bits_uop_imm_packed; // @[execution-unit.scala:474:29]
wire [11:0] fpu_resp_fflags_bits_uop_csr_addr; // @[execution-unit.scala:474:29]
wire [6:0] fpu_resp_fflags_bits_uop_rob_idx; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_uop_stq_idx; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:474:29]
wire [6:0] fpu_resp_fflags_bits_uop_pdst; // @[execution-unit.scala:474:29]
wire [6:0] fpu_resp_fflags_bits_uop_prs1; // @[execution-unit.scala:474:29]
wire [6:0] fpu_resp_fflags_bits_uop_prs2; // @[execution-unit.scala:474:29]
wire [6:0] fpu_resp_fflags_bits_uop_prs3; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_uop_ppred; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:474:29]
wire [6:0] fpu_resp_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_exception; // @[execution-unit.scala:474:29]
wire [63:0] fpu_resp_fflags_bits_uop_exc_cause; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_bypassable; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_mem_size; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_mem_signed; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_fence; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_fencei; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_amo; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_uses_stq; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_unique; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_ldst; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_lrs1; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_lrs2; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_lrs3; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ldst_val; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_frs3_en; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_fp_val; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_fp_single; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_flags; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_valid; // @[execution-unit.scala:474:29]
wire [3:0] fdiv_resp_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:498:30]
wire [6:0] fdiv_resp_fflags_bits_uop_uopc; // @[execution-unit.scala:498:30]
wire [31:0] fdiv_resp_fflags_bits_uop_inst; // @[execution-unit.scala:498:30]
wire [31:0] fdiv_resp_fflags_bits_uop_debug_inst; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_rvc; // @[execution-unit.scala:498:30]
wire [39:0] fdiv_resp_fflags_bits_uop_debug_pc; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_iq_type; // @[execution-unit.scala:498:30]
wire [9:0] fdiv_resp_fflags_bits_uop_fu_code; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_iw_state; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_br; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_jalr; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_jal; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_sfb; // @[execution-unit.scala:498:30]
wire [15:0] fdiv_resp_fflags_bits_uop_br_mask; // @[execution-unit.scala:498:30]
wire [3:0] fdiv_resp_fflags_bits_uop_br_tag; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_edge_inst; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_pc_lob; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_taken; // @[execution-unit.scala:498:30]
wire [19:0] fdiv_resp_fflags_bits_uop_imm_packed; // @[execution-unit.scala:498:30]
wire [11:0] fdiv_resp_fflags_bits_uop_csr_addr; // @[execution-unit.scala:498:30]
wire [6:0] fdiv_resp_fflags_bits_uop_rob_idx; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_uop_stq_idx; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:498:30]
wire [6:0] fdiv_resp_fflags_bits_uop_pdst; // @[execution-unit.scala:498:30]
wire [6:0] fdiv_resp_fflags_bits_uop_prs1; // @[execution-unit.scala:498:30]
wire [6:0] fdiv_resp_fflags_bits_uop_prs2; // @[execution-unit.scala:498:30]
wire [6:0] fdiv_resp_fflags_bits_uop_prs3; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_uop_ppred; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:498:30]
wire [6:0] fdiv_resp_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_exception; // @[execution-unit.scala:498:30]
wire [63:0] fdiv_resp_fflags_bits_uop_exc_cause; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_bypassable; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_mem_size; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_mem_signed; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_fence; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_fencei; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_amo; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_uses_stq; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_unique; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_ldst; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_lrs1; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_lrs2; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_lrs3; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ldst_val; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_frs3_en; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_fp_val; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_fp_single; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_flags; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_valid; // @[execution-unit.scala:498:30]
wire [9:0] _fdiv_busy_T_1 = io_req_bits_uop_fu_code_0 & 10'h80; // @[execution-unit.scala:437:7]
wire _fdiv_busy_T = ~_FDivSqrtUnit_io_req_ready; // @[execution-unit.scala:502:22, :516:18]
wire _fdiv_busy_T_2 = |_fdiv_busy_T_1; // @[micro-op.scala:154:{40,47}]
wire _fdiv_busy_T_3 = io_req_valid_0 & _fdiv_busy_T_2; // @[execution-unit.scala:437:7, :516:58]
assign _fdiv_busy_T_4 = _fdiv_busy_T | _fdiv_busy_T_3; // @[execution-unit.scala:516:{18,41,58}]
assign fdiv_busy = _fdiv_busy_T_4; // @[execution-unit.scala:461:27, :516:41]
wire _io_fresp_valid_T = _FPUUnit_io_resp_valid | _FDivSqrtUnit_io_resp_valid; // @[execution-unit.scala:477:17, :502:22, :525:65]
wire [9:0] _GEN = _FPUUnit_io_resp_bits_uop_fu_code & 10'h200; // @[execution-unit.scala:477:17]
wire [9:0] _io_fresp_valid_T_1; // @[micro-op.scala:154:40]
assign _io_fresp_valid_T_1 = _GEN; // @[micro-op.scala:154:40]
wire [9:0] _queue_io_enq_valid_T; // @[micro-op.scala:154:40]
assign _queue_io_enq_valid_T = _GEN; // @[micro-op.scala:154:40]
wire _io_fresp_valid_T_2 = |_io_fresp_valid_T_1; // @[micro-op.scala:154:{40,47}]
wire _io_fresp_valid_T_3 = _FPUUnit_io_resp_valid & _io_fresp_valid_T_2; // @[execution-unit.scala:477:17, :526:47]
wire _io_fresp_valid_T_4 = ~_io_fresp_valid_T_3; // @[execution-unit.scala:526:{27,47}]
assign _io_fresp_valid_T_5 = _io_fresp_valid_T & _io_fresp_valid_T_4; // @[execution-unit.scala:525:{65,69}, :526:27]
assign io_fresp_valid_0 = _io_fresp_valid_T_5; // @[execution-unit.scala:437:7, :525:69]
assign _io_fresp_bits_uop_T_uopc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_uopc : _FDivSqrtUnit_io_resp_bits_uop_uopc; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_inst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_inst : _FDivSqrtUnit_io_resp_bits_uop_inst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_debug_inst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_debug_inst : _FDivSqrtUnit_io_resp_bits_uop_debug_inst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_rvc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_rvc : _FDivSqrtUnit_io_resp_bits_uop_is_rvc; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_debug_pc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_debug_pc : _FDivSqrtUnit_io_resp_bits_uop_debug_pc; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_iq_type = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_iq_type : _FDivSqrtUnit_io_resp_bits_uop_iq_type; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_fu_code = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_fu_code : _FDivSqrtUnit_io_resp_bits_uop_fu_code; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_br_type = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_br_type : _FDivSqrtUnit_io_resp_bits_uop_ctrl_br_type; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_op1_sel = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_op1_sel : _FDivSqrtUnit_io_resp_bits_uop_ctrl_op1_sel; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_op2_sel = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_op2_sel : _FDivSqrtUnit_io_resp_bits_uop_ctrl_op2_sel; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_imm_sel = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_imm_sel : _FDivSqrtUnit_io_resp_bits_uop_ctrl_imm_sel; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_op_fcn = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_op_fcn : _FDivSqrtUnit_io_resp_bits_uop_ctrl_op_fcn; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_fcn_dw = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_fcn_dw : _FDivSqrtUnit_io_resp_bits_uop_ctrl_fcn_dw; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_csr_cmd = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_csr_cmd : _FDivSqrtUnit_io_resp_bits_uop_ctrl_csr_cmd; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_is_load = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_is_load : _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_load; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_is_sta = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_is_sta : _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_sta; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_is_std = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_is_std : _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_std; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_iw_state = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_iw_state : _FDivSqrtUnit_io_resp_bits_uop_iw_state; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_iw_p1_poisoned = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_iw_p1_poisoned : _FDivSqrtUnit_io_resp_bits_uop_iw_p1_poisoned; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_iw_p2_poisoned = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_iw_p2_poisoned : _FDivSqrtUnit_io_resp_bits_uop_iw_p2_poisoned; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_br = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_br : _FDivSqrtUnit_io_resp_bits_uop_is_br; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_jalr = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_jalr : _FDivSqrtUnit_io_resp_bits_uop_is_jalr; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_jal = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_jal : _FDivSqrtUnit_io_resp_bits_uop_is_jal; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_sfb = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_sfb : _FDivSqrtUnit_io_resp_bits_uop_is_sfb; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_br_mask = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_br_mask : _FDivSqrtUnit_io_resp_bits_uop_br_mask; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_br_tag = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_br_tag : _FDivSqrtUnit_io_resp_bits_uop_br_tag; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ftq_idx = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ftq_idx : _FDivSqrtUnit_io_resp_bits_uop_ftq_idx; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_edge_inst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_edge_inst : _FDivSqrtUnit_io_resp_bits_uop_edge_inst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_pc_lob = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_pc_lob : _FDivSqrtUnit_io_resp_bits_uop_pc_lob; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_taken = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_taken : _FDivSqrtUnit_io_resp_bits_uop_taken; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_imm_packed = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_imm_packed : _FDivSqrtUnit_io_resp_bits_uop_imm_packed; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_csr_addr = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_csr_addr : _FDivSqrtUnit_io_resp_bits_uop_csr_addr; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_rob_idx = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_rob_idx : _FDivSqrtUnit_io_resp_bits_uop_rob_idx; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ldq_idx = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ldq_idx : _FDivSqrtUnit_io_resp_bits_uop_ldq_idx; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_stq_idx = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_stq_idx : _FDivSqrtUnit_io_resp_bits_uop_stq_idx; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_rxq_idx = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_rxq_idx : _FDivSqrtUnit_io_resp_bits_uop_rxq_idx; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_pdst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_pdst : _FDivSqrtUnit_io_resp_bits_uop_pdst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs1 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs1 : _FDivSqrtUnit_io_resp_bits_uop_prs1; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs2 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs2 : _FDivSqrtUnit_io_resp_bits_uop_prs2; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs3 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs3 : _FDivSqrtUnit_io_resp_bits_uop_prs3; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ppred = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ppred : _FDivSqrtUnit_io_resp_bits_uop_ppred; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs1_busy = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs1_busy : _FDivSqrtUnit_io_resp_bits_uop_prs1_busy; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs2_busy = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs2_busy : _FDivSqrtUnit_io_resp_bits_uop_prs2_busy; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs3_busy = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs3_busy : _FDivSqrtUnit_io_resp_bits_uop_prs3_busy; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ppred_busy = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ppred_busy : _FDivSqrtUnit_io_resp_bits_uop_ppred_busy; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_stale_pdst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_stale_pdst : _FDivSqrtUnit_io_resp_bits_uop_stale_pdst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_exception = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_exception : _FDivSqrtUnit_io_resp_bits_uop_exception; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_exc_cause = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_exc_cause : _FDivSqrtUnit_io_resp_bits_uop_exc_cause; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_bypassable = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_bypassable : _FDivSqrtUnit_io_resp_bits_uop_bypassable; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_mem_cmd = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_mem_cmd : _FDivSqrtUnit_io_resp_bits_uop_mem_cmd; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_mem_size = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_mem_size : _FDivSqrtUnit_io_resp_bits_uop_mem_size; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_mem_signed = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_mem_signed : _FDivSqrtUnit_io_resp_bits_uop_mem_signed; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_fence = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_fence : _FDivSqrtUnit_io_resp_bits_uop_is_fence; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_fencei = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_fencei : _FDivSqrtUnit_io_resp_bits_uop_is_fencei; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_amo = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_amo : _FDivSqrtUnit_io_resp_bits_uop_is_amo; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_uses_ldq = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_uses_ldq : _FDivSqrtUnit_io_resp_bits_uop_uses_ldq; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_uses_stq = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_uses_stq : _FDivSqrtUnit_io_resp_bits_uop_uses_stq; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_sys_pc2epc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_sys_pc2epc : _FDivSqrtUnit_io_resp_bits_uop_is_sys_pc2epc; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_unique = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_unique : _FDivSqrtUnit_io_resp_bits_uop_is_unique; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_flush_on_commit = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_flush_on_commit : _FDivSqrtUnit_io_resp_bits_uop_flush_on_commit; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ldst_is_rs1 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ldst_is_rs1 : _FDivSqrtUnit_io_resp_bits_uop_ldst_is_rs1; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ldst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ldst : _FDivSqrtUnit_io_resp_bits_uop_ldst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_lrs1 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_lrs1 : _FDivSqrtUnit_io_resp_bits_uop_lrs1; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_lrs2 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_lrs2 : _FDivSqrtUnit_io_resp_bits_uop_lrs2; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_lrs3 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_lrs3 : _FDivSqrtUnit_io_resp_bits_uop_lrs3; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ldst_val = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ldst_val : _FDivSqrtUnit_io_resp_bits_uop_ldst_val; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_dst_rtype = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_dst_rtype : _FDivSqrtUnit_io_resp_bits_uop_dst_rtype; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_lrs1_rtype = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_lrs1_rtype : _FDivSqrtUnit_io_resp_bits_uop_lrs1_rtype; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_lrs2_rtype = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_lrs2_rtype : _FDivSqrtUnit_io_resp_bits_uop_lrs2_rtype; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_frs3_en = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_frs3_en : _FDivSqrtUnit_io_resp_bits_uop_frs3_en; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_fp_val = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_fp_val : _FDivSqrtUnit_io_resp_bits_uop_fp_val; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_fp_single = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_fp_single : _FDivSqrtUnit_io_resp_bits_uop_fp_single; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_xcpt_pf_if = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_xcpt_pf_if : _FDivSqrtUnit_io_resp_bits_uop_xcpt_pf_if; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_xcpt_ae_if = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_xcpt_ae_if : _FDivSqrtUnit_io_resp_bits_uop_xcpt_ae_if; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_xcpt_ma_if = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_xcpt_ma_if : _FDivSqrtUnit_io_resp_bits_uop_xcpt_ma_if; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_bp_debug_if = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_bp_debug_if : _FDivSqrtUnit_io_resp_bits_uop_bp_debug_if; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_bp_xcpt_if = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_bp_xcpt_if : _FDivSqrtUnit_io_resp_bits_uop_bp_xcpt_if; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_debug_fsrc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_debug_fsrc : _FDivSqrtUnit_io_resp_bits_uop_debug_fsrc; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_debug_tsrc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_debug_tsrc : _FDivSqrtUnit_io_resp_bits_uop_debug_tsrc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_uopc_0 = _io_fresp_bits_uop_T_uopc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_inst_0 = _io_fresp_bits_uop_T_inst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_debug_inst_0 = _io_fresp_bits_uop_T_debug_inst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_rvc_0 = _io_fresp_bits_uop_T_is_rvc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_debug_pc_0 = _io_fresp_bits_uop_T_debug_pc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_iq_type_0 = _io_fresp_bits_uop_T_iq_type; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_fu_code_0 = _io_fresp_bits_uop_T_fu_code; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_br_type_0 = _io_fresp_bits_uop_T_ctrl_br_type; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_op1_sel_0 = _io_fresp_bits_uop_T_ctrl_op1_sel; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_op2_sel_0 = _io_fresp_bits_uop_T_ctrl_op2_sel; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_imm_sel_0 = _io_fresp_bits_uop_T_ctrl_imm_sel; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_op_fcn_0 = _io_fresp_bits_uop_T_ctrl_op_fcn; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_fcn_dw_0 = _io_fresp_bits_uop_T_ctrl_fcn_dw; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_csr_cmd_0 = _io_fresp_bits_uop_T_ctrl_csr_cmd; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_is_load_0 = _io_fresp_bits_uop_T_ctrl_is_load; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_is_sta_0 = _io_fresp_bits_uop_T_ctrl_is_sta; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_is_std_0 = _io_fresp_bits_uop_T_ctrl_is_std; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_iw_state_0 = _io_fresp_bits_uop_T_iw_state; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_iw_p1_poisoned_0 = _io_fresp_bits_uop_T_iw_p1_poisoned; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_iw_p2_poisoned_0 = _io_fresp_bits_uop_T_iw_p2_poisoned; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_br_0 = _io_fresp_bits_uop_T_is_br; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_jalr_0 = _io_fresp_bits_uop_T_is_jalr; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_jal_0 = _io_fresp_bits_uop_T_is_jal; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_sfb_0 = _io_fresp_bits_uop_T_is_sfb; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_br_mask_0 = _io_fresp_bits_uop_T_br_mask; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_br_tag_0 = _io_fresp_bits_uop_T_br_tag; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ftq_idx_0 = _io_fresp_bits_uop_T_ftq_idx; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_edge_inst_0 = _io_fresp_bits_uop_T_edge_inst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_pc_lob_0 = _io_fresp_bits_uop_T_pc_lob; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_taken_0 = _io_fresp_bits_uop_T_taken; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_imm_packed_0 = _io_fresp_bits_uop_T_imm_packed; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_csr_addr_0 = _io_fresp_bits_uop_T_csr_addr; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_rob_idx_0 = _io_fresp_bits_uop_T_rob_idx; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ldq_idx_0 = _io_fresp_bits_uop_T_ldq_idx; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_stq_idx_0 = _io_fresp_bits_uop_T_stq_idx; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_rxq_idx_0 = _io_fresp_bits_uop_T_rxq_idx; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_pdst_0 = _io_fresp_bits_uop_T_pdst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs1_0 = _io_fresp_bits_uop_T_prs1; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs2_0 = _io_fresp_bits_uop_T_prs2; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs3_0 = _io_fresp_bits_uop_T_prs3; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ppred_0 = _io_fresp_bits_uop_T_ppred; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs1_busy_0 = _io_fresp_bits_uop_T_prs1_busy; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs2_busy_0 = _io_fresp_bits_uop_T_prs2_busy; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs3_busy_0 = _io_fresp_bits_uop_T_prs3_busy; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ppred_busy_0 = _io_fresp_bits_uop_T_ppred_busy; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_stale_pdst_0 = _io_fresp_bits_uop_T_stale_pdst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_exception_0 = _io_fresp_bits_uop_T_exception; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_exc_cause_0 = _io_fresp_bits_uop_T_exc_cause; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_bypassable_0 = _io_fresp_bits_uop_T_bypassable; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_mem_cmd_0 = _io_fresp_bits_uop_T_mem_cmd; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_mem_size_0 = _io_fresp_bits_uop_T_mem_size; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_mem_signed_0 = _io_fresp_bits_uop_T_mem_signed; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_fence_0 = _io_fresp_bits_uop_T_is_fence; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_fencei_0 = _io_fresp_bits_uop_T_is_fencei; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_amo_0 = _io_fresp_bits_uop_T_is_amo; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_uses_ldq_0 = _io_fresp_bits_uop_T_uses_ldq; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_uses_stq_0 = _io_fresp_bits_uop_T_uses_stq; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_sys_pc2epc_0 = _io_fresp_bits_uop_T_is_sys_pc2epc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_unique_0 = _io_fresp_bits_uop_T_is_unique; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_flush_on_commit_0 = _io_fresp_bits_uop_T_flush_on_commit; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ldst_is_rs1_0 = _io_fresp_bits_uop_T_ldst_is_rs1; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ldst_0 = _io_fresp_bits_uop_T_ldst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_lrs1_0 = _io_fresp_bits_uop_T_lrs1; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_lrs2_0 = _io_fresp_bits_uop_T_lrs2; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_lrs3_0 = _io_fresp_bits_uop_T_lrs3; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ldst_val_0 = _io_fresp_bits_uop_T_ldst_val; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_dst_rtype_0 = _io_fresp_bits_uop_T_dst_rtype; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_lrs1_rtype_0 = _io_fresp_bits_uop_T_lrs1_rtype; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_lrs2_rtype_0 = _io_fresp_bits_uop_T_lrs2_rtype; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_frs3_en_0 = _io_fresp_bits_uop_T_frs3_en; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_fp_val_0 = _io_fresp_bits_uop_T_fp_val; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_fp_single_0 = _io_fresp_bits_uop_T_fp_single; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_xcpt_pf_if_0 = _io_fresp_bits_uop_T_xcpt_pf_if; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_xcpt_ae_if_0 = _io_fresp_bits_uop_T_xcpt_ae_if; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_xcpt_ma_if_0 = _io_fresp_bits_uop_T_xcpt_ma_if; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_bp_debug_if_0 = _io_fresp_bits_uop_T_bp_debug_if; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_bp_xcpt_if_0 = _io_fresp_bits_uop_T_bp_xcpt_if; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_debug_fsrc_0 = _io_fresp_bits_uop_T_debug_fsrc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_debug_tsrc_0 = _io_fresp_bits_uop_T_debug_tsrc; // @[Mux.scala:50:70]
assign _io_fresp_bits_data_T = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_data : _FDivSqrtUnit_io_resp_bits_data; // @[Mux.scala:50:70]
assign io_fresp_bits_data_0 = _io_fresp_bits_data_T; // @[Mux.scala:50:70]
assign _io_fresp_bits_fflags_T_valid = fpu_resp_val ? fpu_resp_fflags_valid : fdiv_resp_fflags_valid; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_uopc = fpu_resp_val ? fpu_resp_fflags_bits_uop_uopc : fdiv_resp_fflags_bits_uop_uopc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_inst = fpu_resp_val ? fpu_resp_fflags_bits_uop_inst : fdiv_resp_fflags_bits_uop_inst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_debug_inst = fpu_resp_val ? fpu_resp_fflags_bits_uop_debug_inst : fdiv_resp_fflags_bits_uop_debug_inst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_rvc = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_rvc : fdiv_resp_fflags_bits_uop_is_rvc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_debug_pc = fpu_resp_val ? fpu_resp_fflags_bits_uop_debug_pc : fdiv_resp_fflags_bits_uop_debug_pc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_iq_type = fpu_resp_val ? fpu_resp_fflags_bits_uop_iq_type : fdiv_resp_fflags_bits_uop_iq_type; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_fu_code = fpu_resp_val ? fpu_resp_fflags_bits_uop_fu_code : fdiv_resp_fflags_bits_uop_fu_code; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_br_type = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_br_type : fdiv_resp_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_op1_sel = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_op1_sel : fdiv_resp_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_op2_sel = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_op2_sel : fdiv_resp_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_imm_sel = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_imm_sel : fdiv_resp_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_op_fcn = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_op_fcn : fdiv_resp_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_fcn_dw = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_fcn_dw : fdiv_resp_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_csr_cmd = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_csr_cmd : fdiv_resp_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_is_load = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_is_load : fdiv_resp_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_is_sta = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_is_sta : fdiv_resp_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_is_std = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_is_std : fdiv_resp_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_iw_state = fpu_resp_val ? fpu_resp_fflags_bits_uop_iw_state : fdiv_resp_fflags_bits_uop_iw_state; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_iw_p1_poisoned = fpu_resp_val ? fpu_resp_fflags_bits_uop_iw_p1_poisoned : fdiv_resp_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_iw_p2_poisoned = fpu_resp_val ? fpu_resp_fflags_bits_uop_iw_p2_poisoned : fdiv_resp_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_br = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_br : fdiv_resp_fflags_bits_uop_is_br; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_jalr = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_jalr : fdiv_resp_fflags_bits_uop_is_jalr; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_jal = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_jal : fdiv_resp_fflags_bits_uop_is_jal; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_sfb = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_sfb : fdiv_resp_fflags_bits_uop_is_sfb; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_br_mask = fpu_resp_val ? fpu_resp_fflags_bits_uop_br_mask : fdiv_resp_fflags_bits_uop_br_mask; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_br_tag = fpu_resp_val ? fpu_resp_fflags_bits_uop_br_tag : fdiv_resp_fflags_bits_uop_br_tag; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ftq_idx = fpu_resp_val ? fpu_resp_fflags_bits_uop_ftq_idx : fdiv_resp_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_edge_inst = fpu_resp_val ? fpu_resp_fflags_bits_uop_edge_inst : fdiv_resp_fflags_bits_uop_edge_inst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_pc_lob = fpu_resp_val ? fpu_resp_fflags_bits_uop_pc_lob : fdiv_resp_fflags_bits_uop_pc_lob; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_taken = fpu_resp_val ? fpu_resp_fflags_bits_uop_taken : fdiv_resp_fflags_bits_uop_taken; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_imm_packed = fpu_resp_val ? fpu_resp_fflags_bits_uop_imm_packed : fdiv_resp_fflags_bits_uop_imm_packed; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_csr_addr = fpu_resp_val ? fpu_resp_fflags_bits_uop_csr_addr : fdiv_resp_fflags_bits_uop_csr_addr; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_rob_idx = fpu_resp_val ? fpu_resp_fflags_bits_uop_rob_idx : fdiv_resp_fflags_bits_uop_rob_idx; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ldq_idx = fpu_resp_val ? fpu_resp_fflags_bits_uop_ldq_idx : fdiv_resp_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_stq_idx = fpu_resp_val ? fpu_resp_fflags_bits_uop_stq_idx : fdiv_resp_fflags_bits_uop_stq_idx; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_rxq_idx = fpu_resp_val ? fpu_resp_fflags_bits_uop_rxq_idx : fdiv_resp_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_pdst = fpu_resp_val ? fpu_resp_fflags_bits_uop_pdst : fdiv_resp_fflags_bits_uop_pdst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs1 = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs1 : fdiv_resp_fflags_bits_uop_prs1; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs2 = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs2 : fdiv_resp_fflags_bits_uop_prs2; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs3 = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs3 : fdiv_resp_fflags_bits_uop_prs3; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ppred = fpu_resp_val ? fpu_resp_fflags_bits_uop_ppred : fdiv_resp_fflags_bits_uop_ppred; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs1_busy = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs1_busy : fdiv_resp_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs2_busy = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs2_busy : fdiv_resp_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs3_busy = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs3_busy : fdiv_resp_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ppred_busy = fpu_resp_val ? fpu_resp_fflags_bits_uop_ppred_busy : fdiv_resp_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_stale_pdst = fpu_resp_val ? fpu_resp_fflags_bits_uop_stale_pdst : fdiv_resp_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_exception = fpu_resp_val ? fpu_resp_fflags_bits_uop_exception : fdiv_resp_fflags_bits_uop_exception; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_exc_cause = fpu_resp_val ? fpu_resp_fflags_bits_uop_exc_cause : fdiv_resp_fflags_bits_uop_exc_cause; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_bypassable = fpu_resp_val ? fpu_resp_fflags_bits_uop_bypassable : fdiv_resp_fflags_bits_uop_bypassable; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_mem_cmd = fpu_resp_val ? fpu_resp_fflags_bits_uop_mem_cmd : fdiv_resp_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_mem_size = fpu_resp_val ? fpu_resp_fflags_bits_uop_mem_size : fdiv_resp_fflags_bits_uop_mem_size; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_mem_signed = fpu_resp_val ? fpu_resp_fflags_bits_uop_mem_signed : fdiv_resp_fflags_bits_uop_mem_signed; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_fence = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_fence : fdiv_resp_fflags_bits_uop_is_fence; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_fencei = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_fencei : fdiv_resp_fflags_bits_uop_is_fencei; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_amo = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_amo : fdiv_resp_fflags_bits_uop_is_amo; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_uses_ldq = fpu_resp_val ? fpu_resp_fflags_bits_uop_uses_ldq : fdiv_resp_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_uses_stq = fpu_resp_val ? fpu_resp_fflags_bits_uop_uses_stq : fdiv_resp_fflags_bits_uop_uses_stq; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_sys_pc2epc = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_sys_pc2epc : fdiv_resp_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_unique = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_unique : fdiv_resp_fflags_bits_uop_is_unique; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_flush_on_commit = fpu_resp_val ? fpu_resp_fflags_bits_uop_flush_on_commit : fdiv_resp_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ldst_is_rs1 = fpu_resp_val ? fpu_resp_fflags_bits_uop_ldst_is_rs1 : fdiv_resp_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ldst = fpu_resp_val ? fpu_resp_fflags_bits_uop_ldst : fdiv_resp_fflags_bits_uop_ldst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_lrs1 = fpu_resp_val ? fpu_resp_fflags_bits_uop_lrs1 : fdiv_resp_fflags_bits_uop_lrs1; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_lrs2 = fpu_resp_val ? fpu_resp_fflags_bits_uop_lrs2 : fdiv_resp_fflags_bits_uop_lrs2; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_lrs3 = fpu_resp_val ? fpu_resp_fflags_bits_uop_lrs3 : fdiv_resp_fflags_bits_uop_lrs3; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ldst_val = fpu_resp_val ? fpu_resp_fflags_bits_uop_ldst_val : fdiv_resp_fflags_bits_uop_ldst_val; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_dst_rtype = fpu_resp_val ? fpu_resp_fflags_bits_uop_dst_rtype : fdiv_resp_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_lrs1_rtype = fpu_resp_val ? fpu_resp_fflags_bits_uop_lrs1_rtype : fdiv_resp_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_lrs2_rtype = fpu_resp_val ? fpu_resp_fflags_bits_uop_lrs2_rtype : fdiv_resp_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_frs3_en = fpu_resp_val ? fpu_resp_fflags_bits_uop_frs3_en : fdiv_resp_fflags_bits_uop_frs3_en; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_fp_val = fpu_resp_val ? fpu_resp_fflags_bits_uop_fp_val : fdiv_resp_fflags_bits_uop_fp_val; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_fp_single = fpu_resp_val ? fpu_resp_fflags_bits_uop_fp_single : fdiv_resp_fflags_bits_uop_fp_single; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_xcpt_pf_if = fpu_resp_val ? fpu_resp_fflags_bits_uop_xcpt_pf_if : fdiv_resp_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_xcpt_ae_if = fpu_resp_val ? fpu_resp_fflags_bits_uop_xcpt_ae_if : fdiv_resp_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_xcpt_ma_if = fpu_resp_val ? fpu_resp_fflags_bits_uop_xcpt_ma_if : fdiv_resp_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_bp_debug_if = fpu_resp_val ? fpu_resp_fflags_bits_uop_bp_debug_if : fdiv_resp_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_bp_xcpt_if = fpu_resp_val ? fpu_resp_fflags_bits_uop_bp_xcpt_if : fdiv_resp_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_debug_fsrc = fpu_resp_val ? fpu_resp_fflags_bits_uop_debug_fsrc : fdiv_resp_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_debug_tsrc = fpu_resp_val ? fpu_resp_fflags_bits_uop_debug_tsrc : fdiv_resp_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_flags = fpu_resp_val ? fpu_resp_fflags_bits_flags : fdiv_resp_fflags_bits_flags; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign io_fresp_bits_fflags_valid_0 = _io_fresp_bits_fflags_T_valid; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_uopc_0 = _io_fresp_bits_fflags_T_bits_uop_uopc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_inst_0 = _io_fresp_bits_fflags_T_bits_uop_inst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_debug_inst_0 = _io_fresp_bits_fflags_T_bits_uop_debug_inst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_rvc_0 = _io_fresp_bits_fflags_T_bits_uop_is_rvc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_debug_pc_0 = _io_fresp_bits_fflags_T_bits_uop_debug_pc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_iq_type_0 = _io_fresp_bits_fflags_T_bits_uop_iq_type; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_fu_code_0 = _io_fresp_bits_fflags_T_bits_uop_fu_code; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_br_type_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_br_type; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_op1_sel_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_op2_sel_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_imm_sel_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_op_fcn_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_fcn_dw_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_csr_cmd_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_is_load_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_is_load; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_is_sta_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_is_sta; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_is_std_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_is_std; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_iw_state_0 = _io_fresp_bits_fflags_T_bits_uop_iw_state; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_iw_p1_poisoned_0 = _io_fresp_bits_fflags_T_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_iw_p2_poisoned_0 = _io_fresp_bits_fflags_T_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_br_0 = _io_fresp_bits_fflags_T_bits_uop_is_br; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_jalr_0 = _io_fresp_bits_fflags_T_bits_uop_is_jalr; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_jal_0 = _io_fresp_bits_fflags_T_bits_uop_is_jal; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_sfb_0 = _io_fresp_bits_fflags_T_bits_uop_is_sfb; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_br_mask_0 = _io_fresp_bits_fflags_T_bits_uop_br_mask; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_br_tag_0 = _io_fresp_bits_fflags_T_bits_uop_br_tag; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ftq_idx_0 = _io_fresp_bits_fflags_T_bits_uop_ftq_idx; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_edge_inst_0 = _io_fresp_bits_fflags_T_bits_uop_edge_inst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_pc_lob_0 = _io_fresp_bits_fflags_T_bits_uop_pc_lob; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_taken_0 = _io_fresp_bits_fflags_T_bits_uop_taken; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_imm_packed_0 = _io_fresp_bits_fflags_T_bits_uop_imm_packed; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_csr_addr_0 = _io_fresp_bits_fflags_T_bits_uop_csr_addr; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_rob_idx_0 = _io_fresp_bits_fflags_T_bits_uop_rob_idx; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ldq_idx_0 = _io_fresp_bits_fflags_T_bits_uop_ldq_idx; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_stq_idx_0 = _io_fresp_bits_fflags_T_bits_uop_stq_idx; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_rxq_idx_0 = _io_fresp_bits_fflags_T_bits_uop_rxq_idx; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_pdst_0 = _io_fresp_bits_fflags_T_bits_uop_pdst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs1_0 = _io_fresp_bits_fflags_T_bits_uop_prs1; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs2_0 = _io_fresp_bits_fflags_T_bits_uop_prs2; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs3_0 = _io_fresp_bits_fflags_T_bits_uop_prs3; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ppred_0 = _io_fresp_bits_fflags_T_bits_uop_ppred; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs1_busy_0 = _io_fresp_bits_fflags_T_bits_uop_prs1_busy; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs2_busy_0 = _io_fresp_bits_fflags_T_bits_uop_prs2_busy; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs3_busy_0 = _io_fresp_bits_fflags_T_bits_uop_prs3_busy; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ppred_busy_0 = _io_fresp_bits_fflags_T_bits_uop_ppred_busy; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_stale_pdst_0 = _io_fresp_bits_fflags_T_bits_uop_stale_pdst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_exception_0 = _io_fresp_bits_fflags_T_bits_uop_exception; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_exc_cause_0 = _io_fresp_bits_fflags_T_bits_uop_exc_cause; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_bypassable_0 = _io_fresp_bits_fflags_T_bits_uop_bypassable; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_mem_cmd_0 = _io_fresp_bits_fflags_T_bits_uop_mem_cmd; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_mem_size_0 = _io_fresp_bits_fflags_T_bits_uop_mem_size; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_mem_signed_0 = _io_fresp_bits_fflags_T_bits_uop_mem_signed; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_fence_0 = _io_fresp_bits_fflags_T_bits_uop_is_fence; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_fencei_0 = _io_fresp_bits_fflags_T_bits_uop_is_fencei; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_amo_0 = _io_fresp_bits_fflags_T_bits_uop_is_amo; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_uses_ldq_0 = _io_fresp_bits_fflags_T_bits_uop_uses_ldq; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_uses_stq_0 = _io_fresp_bits_fflags_T_bits_uop_uses_stq; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_sys_pc2epc_0 = _io_fresp_bits_fflags_T_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_unique_0 = _io_fresp_bits_fflags_T_bits_uop_is_unique; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_flush_on_commit_0 = _io_fresp_bits_fflags_T_bits_uop_flush_on_commit; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ldst_is_rs1_0 = _io_fresp_bits_fflags_T_bits_uop_ldst_is_rs1; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ldst_0 = _io_fresp_bits_fflags_T_bits_uop_ldst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_lrs1_0 = _io_fresp_bits_fflags_T_bits_uop_lrs1; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_lrs2_0 = _io_fresp_bits_fflags_T_bits_uop_lrs2; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_lrs3_0 = _io_fresp_bits_fflags_T_bits_uop_lrs3; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ldst_val_0 = _io_fresp_bits_fflags_T_bits_uop_ldst_val; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_dst_rtype_0 = _io_fresp_bits_fflags_T_bits_uop_dst_rtype; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_lrs1_rtype_0 = _io_fresp_bits_fflags_T_bits_uop_lrs1_rtype; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_lrs2_rtype_0 = _io_fresp_bits_fflags_T_bits_uop_lrs2_rtype; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_frs3_en_0 = _io_fresp_bits_fflags_T_bits_uop_frs3_en; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_fp_val_0 = _io_fresp_bits_fflags_T_bits_uop_fp_val; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_fp_single_0 = _io_fresp_bits_fflags_T_bits_uop_fp_single; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_xcpt_pf_if_0 = _io_fresp_bits_fflags_T_bits_uop_xcpt_pf_if; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_xcpt_ae_if_0 = _io_fresp_bits_fflags_T_bits_uop_xcpt_ae_if; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_xcpt_ma_if_0 = _io_fresp_bits_fflags_T_bits_uop_xcpt_ma_if; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_bp_debug_if_0 = _io_fresp_bits_fflags_T_bits_uop_bp_debug_if; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_bp_xcpt_if_0 = _io_fresp_bits_fflags_T_bits_uop_bp_xcpt_if; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_debug_fsrc_0 = _io_fresp_bits_fflags_T_bits_uop_debug_fsrc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_debug_tsrc_0 = _io_fresp_bits_fflags_T_bits_uop_debug_tsrc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_flags_0 = _io_fresp_bits_fflags_T_bits_flags; // @[execution-unit.scala:437:7, :530:30]
wire _queue_io_enq_valid_T_1 = |_queue_io_enq_valid_T; // @[micro-op.scala:154:{40,47}]
wire _queue_io_enq_valid_T_2 = _FPUUnit_io_resp_valid & _queue_io_enq_valid_T_1; // @[execution-unit.scala:477:17, :539:52]
wire _queue_io_enq_valid_T_3 = _FPUUnit_io_resp_bits_uop_uopc != 7'h2; // @[execution-unit.scala:477:17, :541:60]
wire _queue_io_enq_valid_T_4 = _queue_io_enq_valid_T_2 & _queue_io_enq_valid_T_3; // @[execution-unit.scala:539:52, :540:74, :541:60]
wire _fp_sdq_io_enq_valid_T = io_req_bits_uop_uopc_0 == 7'h2; // @[execution-unit.scala:437:7, :553:70]
wire _fp_sdq_io_enq_valid_T_1 = io_req_valid_0 & _fp_sdq_io_enq_valid_T; // @[execution-unit.scala:437:7, :553:{46,70}]
wire [15:0] _fp_sdq_io_enq_valid_T_2 = io_brupdate_b1_mispredict_mask_0 & io_req_bits_uop_br_mask_0; // @[util.scala:118:51]
wire _fp_sdq_io_enq_valid_T_3 = |_fp_sdq_io_enq_valid_T_2; // @[util.scala:118:{51,59}]
wire _fp_sdq_io_enq_valid_T_4 = ~_fp_sdq_io_enq_valid_T_3; // @[util.scala:118:59]
wire _fp_sdq_io_enq_valid_T_5 = _fp_sdq_io_enq_valid_T_1 & _fp_sdq_io_enq_valid_T_4; // @[execution-unit.scala:553:{46,81,84}]
wire [11:0] fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp = io_req_bits_rs2_data_0[63:52]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp[11:9]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero_0 = fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_isSpecial_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp[11:10]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_isSpecial = &_fp_sdq_io_enq_bits_data_unrecoded_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [12:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [53:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [12:0] fp_sdq_io_enq_bits_data_unrecoded_rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [53:0] fp_sdq_io_enq_bits_data_unrecoded_rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isNaN_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp[9]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp[9]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isNaN_T_1 = fp_sdq_io_enq_bits_data_unrecoded_rawIn_isSpecial & _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign fp_sdq_io_enq_bits_data_unrecoded_rawIn_isNaN = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T_1 = ~_fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T_2 = fp_sdq_io_enq_bits_data_unrecoded_rawIn_isSpecial & _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign fp_sdq_io_enq_bits_data_unrecoded_rawIn_isInf = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sign_T = io_req_bits_rs2_data_0[64]; // @[rawFloatFromRecFN.scala:59:25]
assign fp_sdq_io_enq_bits_data_unrecoded_rawIn_sign = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sExp_T = {1'h0, fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign fp_sdq_io_enq_bits_data_unrecoded_rawIn_sExp = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T = ~fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_1 = {1'h0, _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [51:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_2 = io_req_bits_rs2_data_0[51:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_3 = {_fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_1, _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign fp_sdq_io_enq_bits_data_unrecoded_rawIn_sig = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire fp_sdq_io_enq_bits_data_unrecoded_isSubnormal = $signed(fp_sdq_io_enq_bits_data_unrecoded_rawIn_sExp) < 13'sh402; // @[rawFloatFromRecFN.scala:55:23]
wire [5:0] _fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_sExp[5:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [6:0] _fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist_T_1 = 7'h1 - {1'h0, _fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist_T}; // @[fNFromRecFN.scala:52:{35,47}]
wire [5:0] fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist = _fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist_T_1[5:0]; // @[fNFromRecFN.scala:52:35]
wire [52:0] _fp_sdq_io_enq_bits_data_unrecoded_denormFract_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_sig[53:1]; // @[rawFloatFromRecFN.scala:55:23]
wire [52:0] _fp_sdq_io_enq_bits_data_unrecoded_denormFract_T_1 = _fp_sdq_io_enq_bits_data_unrecoded_denormFract_T >> fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist; // @[fNFromRecFN.scala:52:35, :53:{38,42}]
wire [51:0] fp_sdq_io_enq_bits_data_unrecoded_denormFract = _fp_sdq_io_enq_bits_data_unrecoded_denormFract_T_1[51:0]; // @[fNFromRecFN.scala:53:{42,60}]
wire [10:0] _fp_sdq_io_enq_bits_data_unrecoded_expOut_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_sExp[10:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [11:0] _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_1 = {1'h0, _fp_sdq_io_enq_bits_data_unrecoded_expOut_T} - 12'h401; // @[fNFromRecFN.scala:58:{27,45}]
wire [10:0] _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_2 = _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_1[10:0]; // @[fNFromRecFN.scala:58:45]
wire [10:0] _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_3 = fp_sdq_io_enq_bits_data_unrecoded_isSubnormal ? 11'h0 : _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_2; // @[fNFromRecFN.scala:51:38, :56:16, :58:45]
wire _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_4 = fp_sdq_io_enq_bits_data_unrecoded_rawIn_isNaN | fp_sdq_io_enq_bits_data_unrecoded_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire [10:0] _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_5 = {11{_fp_sdq_io_enq_bits_data_unrecoded_expOut_T_4}}; // @[fNFromRecFN.scala:60:{21,44}]
wire [10:0] fp_sdq_io_enq_bits_data_unrecoded_expOut = _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_3 | _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_5; // @[fNFromRecFN.scala:56:16, :60:{15,21}]
wire [51:0] _fp_sdq_io_enq_bits_data_unrecoded_fractOut_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_sig[51:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [51:0] _fp_sdq_io_enq_bits_data_unrecoded_fractOut_T_1 = fp_sdq_io_enq_bits_data_unrecoded_rawIn_isInf ? 52'h0 : _fp_sdq_io_enq_bits_data_unrecoded_fractOut_T; // @[rawFloatFromRecFN.scala:55:23]
wire [51:0] fp_sdq_io_enq_bits_data_unrecoded_fractOut = fp_sdq_io_enq_bits_data_unrecoded_isSubnormal ? fp_sdq_io_enq_bits_data_unrecoded_denormFract : _fp_sdq_io_enq_bits_data_unrecoded_fractOut_T_1; // @[fNFromRecFN.scala:51:38, :53:60, :62:16, :64:20]
wire [11:0] fp_sdq_io_enq_bits_data_unrecoded_hi = {fp_sdq_io_enq_bits_data_unrecoded_rawIn_sign, fp_sdq_io_enq_bits_data_unrecoded_expOut}; // @[rawFloatFromRecFN.scala:55:23]
wire [63:0] fp_sdq_io_enq_bits_data_unrecoded = {fp_sdq_io_enq_bits_data_unrecoded_hi, fp_sdq_io_enq_bits_data_unrecoded_fractOut}; // @[fNFromRecFN.scala:62:16, :66:12]
wire _fp_sdq_io_enq_bits_data_prevRecoded_T = io_req_bits_rs2_data_0[31]; // @[FPU.scala:442:10]
wire _fp_sdq_io_enq_bits_data_prevRecoded_T_1 = io_req_bits_rs2_data_0[52]; // @[FPU.scala:443:10]
wire [30:0] _fp_sdq_io_enq_bits_data_prevRecoded_T_2 = io_req_bits_rs2_data_0[30:0]; // @[FPU.scala:444:10]
wire [1:0] fp_sdq_io_enq_bits_data_prevRecoded_hi = {_fp_sdq_io_enq_bits_data_prevRecoded_T, _fp_sdq_io_enq_bits_data_prevRecoded_T_1}; // @[FPU.scala:441:28, :442:10, :443:10]
wire [32:0] fp_sdq_io_enq_bits_data_prevRecoded = {fp_sdq_io_enq_bits_data_prevRecoded_hi, _fp_sdq_io_enq_bits_data_prevRecoded_T_2}; // @[FPU.scala:441:28, :444:10]
wire [8:0] fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp = fp_sdq_io_enq_bits_data_prevRecoded[31:23]; // @[FPU.scala:441:28]
wire [2:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero_0 = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isSpecial_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isSpecial = &_fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isNaN_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isNaN_T_1 = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isSpecial & _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isNaN = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T_1 = ~_fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T_2 = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isSpecial & _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isInf = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sign_T = fp_sdq_io_enq_bits_data_prevRecoded[32]; // @[FPU.scala:441:28]
assign fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sign = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sExp_T = {1'h0, fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sExp = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T = ~fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_1 = {1'h0, _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_2 = fp_sdq_io_enq_bits_data_prevRecoded[22:0]; // @[FPU.scala:441:28]
assign _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_3 = {_fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_1, _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sig = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_isSubnormal = $signed(fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sExp) < 10'sh82; // @[rawFloatFromRecFN.scala:55:23]
wire [4:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sExp[4:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [5:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist_T_1 = 6'h1 - {1'h0, _fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist_T}; // @[fNFromRecFN.scala:52:{35,47}]
wire [4:0] fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist = _fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist_T_1[4:0]; // @[fNFromRecFN.scala:52:35]
wire [23:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sig[24:1]; // @[rawFloatFromRecFN.scala:55:23]
wire [23:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract_T_1 = _fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract_T >> fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist; // @[fNFromRecFN.scala:52:35, :53:{38,42}]
wire [22:0] fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract = _fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract_T_1[22:0]; // @[fNFromRecFN.scala:53:{42,60}]
wire [7:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sExp[7:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [8:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_1 = {1'h0, _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T} - 9'h81; // @[fNFromRecFN.scala:58:{27,45}]
wire [7:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_2 = _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_1[7:0]; // @[fNFromRecFN.scala:58:45]
wire [7:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_3 = fp_sdq_io_enq_bits_data_prevUnrecoded_isSubnormal ? 8'h0 : _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_2; // @[fNFromRecFN.scala:51:38, :56:16, :58:45]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_4 = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isNaN | fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire [7:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_5 = {8{_fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_4}}; // @[fNFromRecFN.scala:60:{21,44}]
wire [7:0] fp_sdq_io_enq_bits_data_prevUnrecoded_expOut = _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_3 | _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_5; // @[fNFromRecFN.scala:56:16, :60:{15,21}]
wire [22:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sig[22:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [22:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut_T_1 = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isInf ? 23'h0 : _fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut_T; // @[rawFloatFromRecFN.scala:55:23]
wire [22:0] fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut = fp_sdq_io_enq_bits_data_prevUnrecoded_isSubnormal ? fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract : _fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut_T_1; // @[fNFromRecFN.scala:51:38, :53:60, :62:16, :64:20]
wire [8:0] fp_sdq_io_enq_bits_data_prevUnrecoded_hi = {fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sign, fp_sdq_io_enq_bits_data_prevUnrecoded_expOut}; // @[rawFloatFromRecFN.scala:55:23]
wire [31:0] fp_sdq_io_enq_bits_data_prevUnrecoded = {fp_sdq_io_enq_bits_data_prevUnrecoded_hi, fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut}; // @[fNFromRecFN.scala:62:16, :66:12]
wire [31:0] _fp_sdq_io_enq_bits_data_T = fp_sdq_io_enq_bits_data_unrecoded[63:32]; // @[FPU.scala:446:21]
wire [2:0] _fp_sdq_io_enq_bits_data_T_1 = io_req_bits_rs2_data_0[63:61]; // @[FPU.scala:249:25]
wire _fp_sdq_io_enq_bits_data_T_2 = &_fp_sdq_io_enq_bits_data_T_1; // @[FPU.scala:249:{25,56}]
wire [31:0] _fp_sdq_io_enq_bits_data_T_3 = fp_sdq_io_enq_bits_data_unrecoded[31:0]; // @[FPU.scala:446:81]
wire [31:0] _fp_sdq_io_enq_bits_data_T_4 = _fp_sdq_io_enq_bits_data_T_2 ? fp_sdq_io_enq_bits_data_prevUnrecoded : _fp_sdq_io_enq_bits_data_T_3; // @[FPU.scala:249:56, :446:{44,81}]
wire [63:0] _fp_sdq_io_enq_bits_data_T_5 = {_fp_sdq_io_enq_bits_data_T, _fp_sdq_io_enq_bits_data_T_4}; // @[FPU.scala:446:{10,21,44}] |
Generate the Verilog code corresponding to the following Chisel files.
File Serdes.scala:
package testchipip.serdes
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config._
class GenericSerializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericSerializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(t))
val out = Decoupled(new Flit(flitWidth))
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.flit := Mux(beat === 0.U, io.in.bits.asUInt, data(beat))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(flitWidth.W)))
data(0) := DontCare // unused, DCE this
}
}
io.busy := io.out.valid
}
class GenericDeserializer[T <: Data](t: T, flitWidth: Int) extends Module {
override def desiredName = s"GenericDeserializer_${t.typeName}w${t.getWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(t)
val busy = Output(Bool())
})
val dataBits = t.getWidth.max(flitWidth)
val dataBeats = (dataBits - 1) / flitWidth + 1
require(dataBeats >= 1)
val data = Reg(Vec(dataBeats-1, UInt(flitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits := (if (dataBeats == 1) {
io.in.bits.flit.asTypeOf(t)
} else {
Cat(io.in.bits.flit, data.asUInt).asTypeOf(t)
})
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat(log2Ceil(dataBeats-1)-1,0)) := io.in.bits.flit
}
}
}
io.busy := beat =/= 0.U
}
class FlitToPhit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"FlitToPhit_f${flitWidth}_p${phitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Phit(phitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready && beat === 0.U
io.out.valid := io.in.valid || beat =/= 0.U
io.out.bits.phit := (if (dataBeats == 1) io.in.bits.flit else Mux(beat === 0.U, io.in.bits.flit, data(beat-1.U)))
when (io.out.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) {
data := io.in.bits.asTypeOf(Vec(dataBeats, UInt(phitWidth.W))).tail
}
}
}
object FlitToPhit {
def apply(flit: DecoupledIO[Flit], phitWidth: Int): DecoupledIO[Phit] = {
val flit2phit = Module(new FlitToPhit(flit.bits.flitWidth, phitWidth))
flit2phit.io.in <> flit
flit2phit.io.out
}
}
class PhitToFlit(flitWidth: Int, phitWidth: Int) extends Module {
override def desiredName = s"PhitToFlit_p${phitWidth}_f${flitWidth}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Decoupled(new Flit(flitWidth))
})
require(flitWidth >= phitWidth)
val dataBeats = (flitWidth - 1) / phitWidth + 1
val data = Reg(Vec(dataBeats-1, UInt(phitWidth.W)))
val beat = RegInit(0.U(log2Ceil(dataBeats).W))
io.in.ready := io.out.ready || beat =/= (dataBeats-1).U
io.out.valid := io.in.valid && beat === (dataBeats-1).U
io.out.bits.flit := (if (dataBeats == 1) io.in.bits.phit else Cat(io.in.bits.phit, data.asUInt))
when (io.in.fire) {
beat := Mux(beat === (dataBeats-1).U, 0.U, beat + 1.U)
if (dataBeats > 1) {
when (beat =/= (dataBeats-1).U) {
data(beat) := io.in.bits.phit
}
}
}
}
object PhitToFlit {
def apply(phit: DecoupledIO[Phit], flitWidth: Int): DecoupledIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in <> phit
phit2flit.io.out
}
def apply(phit: ValidIO[Phit], flitWidth: Int): ValidIO[Flit] = {
val phit2flit = Module(new PhitToFlit(flitWidth, phit.bits.phitWidth))
phit2flit.io.in.valid := phit.valid
phit2flit.io.in.bits := phit.bits
when (phit.valid) { assert(phit2flit.io.in.ready) }
val out = Wire(Valid(new Flit(flitWidth)))
out.valid := phit2flit.io.out.valid
out.bits := phit2flit.io.out.bits
phit2flit.io.out.ready := true.B
out
}
}
class PhitArbiter(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitArbiter_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Vec(channels, Decoupled(new Phit(phitWidth))))
val out = Decoupled(new Phit(phitWidth))
})
if (channels == 1) {
io.out <> io.in(0)
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val chosen_reg = Reg(UInt(headerWidth.W))
val chosen_prio = PriorityEncoder(io.in.map(_.valid))
val chosen = Mux(beat === 0.U, chosen_prio, chosen_reg)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.out.valid := VecInit(io.in.map(_.valid))(chosen)
io.out.bits.phit := Mux(beat < headerBeats.U,
chosen.asTypeOf(Vec(headerBeats, UInt(phitWidth.W)))(header_idx),
VecInit(io.in.map(_.bits.phit))(chosen))
for (i <- 0 until channels) {
io.in(i).ready := io.out.ready && beat >= headerBeats.U && chosen_reg === i.U
}
when (io.out.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat === 0.U) { chosen_reg := chosen_prio }
}
}
}
class PhitDemux(phitWidth: Int, flitWidth: Int, channels: Int) extends Module {
override def desiredName = s"PhitDemux_p${phitWidth}_f${flitWidth}_n${channels}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Phit(phitWidth)))
val out = Vec(channels, Decoupled(new Phit(phitWidth)))
})
if (channels == 1) {
io.out(0) <> io.in
} else {
val headerWidth = log2Ceil(channels)
val headerBeats = (headerWidth - 1) / phitWidth + 1
val flitBeats = (flitWidth - 1) / phitWidth + 1
val beats = headerBeats + flitBeats
val beat = RegInit(0.U(log2Ceil(beats).W))
val channel_vec = Reg(Vec(headerBeats, UInt(phitWidth.W)))
val channel = channel_vec.asUInt(log2Ceil(channels)-1,0)
val header_idx = if (headerBeats == 1) 0.U else beat(log2Ceil(headerBeats)-1,0)
io.in.ready := beat < headerBeats.U || VecInit(io.out.map(_.ready))(channel)
for (c <- 0 until channels) {
io.out(c).valid := io.in.valid && beat >= headerBeats.U && channel === c.U
io.out(c).bits.phit := io.in.bits.phit
}
when (io.in.fire) {
beat := Mux(beat === (beats-1).U, 0.U, beat + 1.U)
when (beat < headerBeats.U) {
channel_vec(header_idx) := io.in.bits.phit
}
}
}
}
class DecoupledFlitToCreditedFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"DecoupledFlitToCreditedFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Flipped(Decoupled(new Flit(flitWidth)))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = io.out.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credits + credit_incr - Mux(io.credit.valid, io.credit.bits.flit +& 1.U, 0.U)
}
io.out.valid := io.in.valid && credits < bufferSz.U
io.out.bits.flit := io.in.bits.flit
io.in.ready := io.out.ready && credits < bufferSz.U
io.credit.ready := true.B
}
class CreditedFlitToDecoupledFlit(flitWidth: Int, bufferSz: Int) extends Module {
override def desiredName = s"CreditedFlitToDecoupledFlit_f${flitWidth}_b${bufferSz}"
val io = IO(new Bundle {
val in = Flipped(Decoupled(new Flit(flitWidth)))
val out = Decoupled(new Flit(flitWidth))
val credit = Decoupled(new Flit(flitWidth))
})
val creditWidth = log2Ceil(bufferSz)
require(creditWidth <= flitWidth)
val buffer = Module(new Queue(new Flit(flitWidth), bufferSz))
val credits = RegInit(0.U((creditWidth+1).W))
val credit_incr = buffer.io.deq.fire
val credit_decr = io.credit.fire
when (credit_incr || credit_decr) {
credits := credit_incr + Mux(credit_decr, 0.U, credits)
}
buffer.io.enq.valid := io.in.valid
buffer.io.enq.bits := io.in.bits
io.in.ready := true.B
when (io.in.valid) { assert(buffer.io.enq.ready) }
io.out <> buffer.io.deq
io.credit.valid := credits =/= 0.U
io.credit.bits.flit := credits - 1.U
}
| module GenericSerializer_TLBeatw87_f32( // @[Serdes.scala:8:7]
input clock, // @[Serdes.scala:8:7]
input reset, // @[Serdes.scala:8:7]
output io_in_ready, // @[Serdes.scala:10:14]
input io_in_bits_head, // @[Serdes.scala:10:14]
input io_in_bits_tail, // @[Serdes.scala:10:14]
input io_out_ready, // @[Serdes.scala:10:14]
output io_out_valid, // @[Serdes.scala:10:14]
output [31:0] io_out_bits_flit, // @[Serdes.scala:10:14]
output io_busy // @[Serdes.scala:10:14]
);
wire io_out_valid_0; // @[Serdes.scala:8:7]
wire io_in_bits_head_0 = io_in_bits_head; // @[Serdes.scala:8:7]
wire io_in_bits_tail_0 = io_in_bits_tail; // @[Serdes.scala:8:7]
wire io_out_ready_0 = io_out_ready; // @[Serdes.scala:8:7]
wire [0:0][31:0] _GEN = '{32'h0};
wire [84:0] io_in_bits_payload = 85'h0; // @[Serdes.scala:8:7, :10:14]
wire io_in_valid = 1'h0; // @[Serdes.scala:8:7, :10:14]
wire _io_in_ready_T_1; // @[Serdes.scala:22:31]
wire _io_out_valid_T_1; // @[Serdes.scala:23:31]
wire io_busy_0 = io_out_valid_0; // @[Serdes.scala:8:7]
wire io_in_ready_0; // @[Serdes.scala:8:7]
wire [31:0] io_out_bits_flit_0; // @[Serdes.scala:8:7]
reg [31:0] data_1; // @[Serdes.scala:19:17]
reg [31:0] data_2; // @[Serdes.scala:19:17]
reg [1:0] beat; // @[Serdes.scala:20:21]
wire _io_in_ready_T = ~(|beat); // @[Serdes.scala:20:21, :22:39]
assign _io_in_ready_T_1 = io_out_ready_0 & _io_in_ready_T; // @[Serdes.scala:8:7, :22:{31,39}]
assign io_in_ready_0 = _io_in_ready_T_1; // @[Serdes.scala:8:7, :22:31]
wire _io_out_valid_T = |beat; // @[Serdes.scala:20:21, :22:39, :23:39]
assign _io_out_valid_T_1 = _io_out_valid_T; // @[Serdes.scala:23:{31,39}]
assign io_out_valid_0 = _io_out_valid_T_1; // @[Serdes.scala:8:7, :23:31]
wire _io_out_bits_flit_T = ~(|beat); // @[Serdes.scala:20:21, :22:39, :24:32]
wire [85:0] _GEN_0 = {85'h0, io_in_bits_head_0}; // @[Serdes.scala:8:7, :10:14, :24:52]
wire [85:0] io_out_bits_flit_hi; // @[Serdes.scala:24:52]
assign io_out_bits_flit_hi = _GEN_0; // @[Serdes.scala:24:52]
wire [85:0] hi; // @[Serdes.scala:29:34]
assign hi = _GEN_0; // @[Serdes.scala:24:52, :29:34]
wire [86:0] _io_out_bits_flit_T_1 = {io_out_bits_flit_hi, io_in_bits_tail_0}; // @[Serdes.scala:8:7, :24:52]
wire [3:0][31:0] _GEN_1 = {_GEN, {{data_2}, {data_1}, {32'h0}}}; // @[Serdes.scala:19:17, :24:26]
wire [86:0] _io_out_bits_flit_T_2 = _io_out_bits_flit_T ? _io_out_bits_flit_T_1 : {55'h0, _GEN_1[beat]}; // @[Serdes.scala:20:21, :24:{26,32,52}]
assign io_out_bits_flit_0 = _io_out_bits_flit_T_2[31:0]; // @[Serdes.scala:8:7, :24:{20,26}]
wire _beat_T = beat == 2'h2; // @[Serdes.scala:20:21, :27:22]
wire [2:0] _beat_T_1 = {1'h0, beat} + 3'h1; // @[Serdes.scala:8:7, :10:14, :20:21, :27:53]
wire [1:0] _beat_T_2 = _beat_T_1[1:0]; // @[Serdes.scala:27:53]
wire [1:0] _beat_T_3 = _beat_T ? 2'h0 : _beat_T_2; // @[Serdes.scala:27:{16,22,53}]
wire _T = io_out_ready_0 & io_out_valid_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[Serdes.scala:8:7]
if (_T & ~(|beat)) begin // @[Decoupled.scala:51:35]
data_1 <= hi[62:31]; // @[Serdes.scala:19:17, :29:34]
data_2 <= {9'h0, hi[85:63]}; // @[Serdes.scala:19:17, :29:34]
end
if (reset) // @[Serdes.scala:8:7]
beat <= 2'h0; // @[Serdes.scala:20:21]
else if (_T) // @[Decoupled.scala:51:35]
beat <= _beat_T_3; // @[Serdes.scala:20:21, :27:16]
always @(posedge)
assign io_in_ready = io_in_ready_0; // @[Serdes.scala:8:7]
assign io_out_valid = io_out_valid_0; // @[Serdes.scala:8:7]
assign io_out_bits_flit = io_out_bits_flit_0; // @[Serdes.scala:8:7]
assign io_busy = io_busy_0; // @[Serdes.scala:8:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_88( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_89 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_103( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [13:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [11:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [13:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [13:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [11:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [13:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count = 1'h0; // @[Edges.scala:234:25]
wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27]
wire c_first_count = 1'h0; // @[Edges.scala:234:25]
wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21]
wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last = 1'h1; // @[Edges.scala:232:33]
wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33]
wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28]
wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7]
wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_first_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_first_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_first_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_first_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_set_wo_ready_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_set_wo_ready_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_opcodes_set_interm_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_opcodes_set_interm_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_sizes_set_interm_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_sizes_set_interm_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_opcodes_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_opcodes_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_sizes_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_sizes_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_probe_ack_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_probe_ack_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _c_probe_ack_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _c_probe_ack_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _same_cycle_resp_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _same_cycle_resp_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _same_cycle_resp_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _same_cycle_resp_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [11:0] _same_cycle_resp_WIRE_4_bits_address = 12'h0; // @[Bundles.scala:265:74]
wire [11:0] _same_cycle_resp_WIRE_5_bits_address = 12'h0; // @[Bundles.scala:265:61]
wire [13:0] _c_first_WIRE_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _c_first_WIRE_1_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _c_first_WIRE_2_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _c_first_WIRE_3_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _c_set_wo_ready_WIRE_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _c_set_wo_ready_WIRE_1_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _c_set_WIRE_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _c_set_WIRE_1_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _c_opcodes_set_interm_WIRE_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _c_opcodes_set_interm_WIRE_1_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _c_sizes_set_interm_WIRE_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _c_sizes_set_interm_WIRE_1_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _c_opcodes_set_WIRE_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _c_opcodes_set_WIRE_1_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _c_sizes_set_WIRE_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _c_sizes_set_WIRE_1_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _c_probe_ack_WIRE_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _c_probe_ack_WIRE_1_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _c_probe_ack_WIRE_2_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _c_probe_ack_WIRE_3_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _same_cycle_resp_WIRE_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _same_cycle_resp_WIRE_1_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _same_cycle_resp_WIRE_2_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _same_cycle_resp_WIRE_3_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [13:0] _same_cycle_resp_WIRE_4_bits_source = 14'h0; // @[Bundles.scala:265:74]
wire [13:0] _same_cycle_resp_WIRE_5_bits_source = 14'h0; // @[Bundles.scala:265:61]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_beats1_decode_T_2 = 3'h0; // @[package.scala:243:46]
wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [131073:0] _c_sizes_set_T_1 = 131074'h0; // @[Monitor.scala:768:52]
wire [16:0] _c_opcodes_set_T = 17'h0; // @[Monitor.scala:767:79]
wire [16:0] _c_sizes_set_T = 17'h0; // @[Monitor.scala:768:77]
wire [131074:0] _c_opcodes_set_T_1 = 131075'h0; // @[Monitor.scala:767:54]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [16383:0] _c_set_wo_ready_T = 16384'h1; // @[OneHot.scala:58:35]
wire [16383:0] _c_set_T = 16384'h1; // @[OneHot.scala:58:35]
wire [32831:0] c_opcodes_set = 32832'h0; // @[Monitor.scala:740:34]
wire [32831:0] c_sizes_set = 32832'h0; // @[Monitor.scala:741:34]
wire [8207:0] c_set = 8208'h0; // @[Monitor.scala:738:34]
wire [8207:0] c_set_wo_ready = 8208'h0; // @[Monitor.scala:739:34]
wire [2:0] _c_first_beats1_decode_T_1 = 3'h7; // @[package.scala:243:76]
wire [5:0] _c_first_beats1_decode_T = 6'h7; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [13:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [13:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 14'h2010; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [5:0] _GEN = 6'h7 << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [5:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [2:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [11:0] _is_aligned_T = {9'h0, io_in_a_bits_address_0[2:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 12'h0; // @[Edges.scala:21:{16,24}]
wire [2:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = &io_in_a_bits_size_0; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [13:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [13:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [13:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [13:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [13:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [13:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [13:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [13:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [13:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [13:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 14'h2010; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_665 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_665; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_665; // @[Decoupled.scala:51:35]
wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
reg a_first_counter; // @[Edges.scala:229:27]
wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28]
wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [1:0] size; // @[Monitor.scala:389:22]
reg [13:0] source; // @[Monitor.scala:390:22]
reg [11:0] address; // @[Monitor.scala:391:22]
wire _T_733 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_733; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_733; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_733; // @[Decoupled.scala:51:35]
wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35]
wire [5:0] _GEN_0 = 6'h7 << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [2:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
reg d_first_counter; // @[Edges.scala:229:27]
wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28]
wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] size_1; // @[Monitor.scala:540:22]
reg [13:0] source_1; // @[Monitor.scala:541:22]
reg [8207:0] inflight; // @[Monitor.scala:614:27]
reg [32831:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [32831:0] inflight_sizes; // @[Monitor.scala:618:33]
wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
reg a_first_counter_1; // @[Edges.scala:229:27]
wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire [8207:0] a_set; // @[Monitor.scala:626:34]
wire [8207:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [32831:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [32831:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [16:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [16:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [16:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [16:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [16:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [16:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [16:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [16:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [16:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [32831:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [32831:0] _a_opcode_lookup_T_6 = {32828'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [32831:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[32831:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [32831:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [32831:0] _a_size_lookup_T_6 = {32828'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [32831:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[32831:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [16383:0] _GEN_2 = 16384'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [16383:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [16383:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[8207:0] : 8208'h0; // @[OneHot.scala:58:35]
wire _T_598 = _T_665 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_598 ? _a_set_T[8207:0] : 8208'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [16:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [16:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [16:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [131074:0] _a_opcodes_set_T_1 = {131071'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[32831:0] : 32832'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [131073:0] _a_sizes_set_T_1 = {131071'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[32831:0] : 32832'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [8207:0] d_clr; // @[Monitor.scala:664:34]
wire [8207:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [32831:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [32831:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [16383:0] _GEN_5 = 16384'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [16383:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [16383:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [16383:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [16383:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[8207:0] : 8208'h0; // @[OneHot.scala:58:35]
wire _T_613 = _T_733 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_613 ? _d_clr_T[8207:0] : 8208'h0; // @[OneHot.scala:58:35]
wire [131086:0] _d_opcodes_clr_T_5 = 131087'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[32831:0] : 32832'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [131086:0] _d_sizes_clr_T_5 = 131087'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[32831:0] : 32832'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [8207:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [8207:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [8207:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [32831:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [32831:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [32831:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [32831:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [32831:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [32831:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [8207:0] inflight_1; // @[Monitor.scala:726:35]
wire [8207:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [32831:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [32831:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [32831:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [32831:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
reg d_first_counter_2; // @[Edges.scala:229:27]
wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28]
wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [32831:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [32831:0] _c_opcode_lookup_T_6 = {32828'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [32831:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[32831:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [32831:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [32831:0] _c_size_lookup_T_6 = {32828'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [32831:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[32831:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [8207:0] d_clr_1; // @[Monitor.scala:774:34]
wire [8207:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [32831:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [32831:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_709 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_709 & d_release_ack_1 ? _d_clr_wo_ready_T_1[8207:0] : 8208'h0; // @[OneHot.scala:58:35]
wire _T_691 = _T_733 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_691 ? _d_clr_T_1[8207:0] : 8208'h0; // @[OneHot.scala:58:35]
wire [131086:0] _d_opcodes_clr_T_11 = 131087'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_691 ? _d_opcodes_clr_T_11[32831:0] : 32832'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [131086:0] _d_sizes_clr_T_11 = 131087'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_691 ? _d_sizes_clr_T_11[32831:0] : 32832'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 14'h0; // @[Monitor.scala:36:7, :795:113]
wire [8207:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [8207:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [32831:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [32831:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [32831:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [32831:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Breakpoint.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Cat}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile.{CoreBundle, HasCoreParameters}
import freechips.rocketchip.util._
class BPControl(implicit p: Parameters) extends CoreBundle()(p) {
val ttype = UInt(4.W)
val dmode = Bool()
val maskmax = UInt(6.W)
val reserved = UInt((xLen - (if (coreParams.useBPWatch) 26 else 24)).W)
val action = UInt((if (coreParams.useBPWatch) 3 else 1).W)
val chain = Bool()
val zero = UInt(2.W)
val tmatch = UInt(2.W)
val m = Bool()
val h = Bool()
val s = Bool()
val u = Bool()
val x = Bool()
val w = Bool()
val r = Bool()
def tType = 2
def maskMax = 4
def enabled(mstatus: MStatus) = !mstatus.debug && Cat(m, h, s, u)(mstatus.prv)
}
class TExtra(implicit p: Parameters) extends CoreBundle()(p) {
def mvalueBits: Int = if (xLen == 32) coreParams.mcontextWidth min 6 else coreParams.mcontextWidth min 13
def svalueBits: Int = if (xLen == 32) coreParams.scontextWidth min 16 else coreParams.scontextWidth min 34
def mselectPos: Int = if (xLen == 32) 25 else 50
def mvaluePos : Int = mselectPos + 1
def sselectPos: Int = 0
def svaluePos : Int = 2
val mvalue = UInt(mvalueBits.W)
val mselect = Bool()
val pad2 = UInt((mselectPos - svalueBits - 2).W)
val svalue = UInt(svalueBits.W)
val pad1 = UInt(1.W)
val sselect = Bool()
}
class BP(implicit p: Parameters) extends CoreBundle()(p) {
val control = new BPControl
val address = UInt(vaddrBits.W)
val textra = new TExtra
def contextMatch(mcontext: UInt, scontext: UInt) =
(if (coreParams.mcontextWidth > 0) (!textra.mselect || (mcontext(textra.mvalueBits-1,0) === textra.mvalue)) else true.B) &&
(if (coreParams.scontextWidth > 0) (!textra.sselect || (scontext(textra.svalueBits-1,0) === textra.svalue)) else true.B)
def mask(dummy: Int = 0) =
(0 until control.maskMax-1).scanLeft(control.tmatch(0))((m, i) => m && address(i)).asUInt
def pow2AddressMatch(x: UInt) =
(~x | mask()) === (~address | mask())
def rangeAddressMatch(x: UInt) =
(x >= address) ^ control.tmatch(0)
def addressMatch(x: UInt) =
Mux(control.tmatch(1), rangeAddressMatch(x), pow2AddressMatch(x))
}
class BPWatch (val n: Int) extends Bundle() {
val valid = Vec(n, Bool())
val rvalid = Vec(n, Bool())
val wvalid = Vec(n, Bool())
val ivalid = Vec(n, Bool())
val action = UInt(3.W)
}
class BreakpointUnit(n: Int)(implicit val p: Parameters) extends Module with HasCoreParameters {
val io = IO(new Bundle {
val status = Input(new MStatus())
val bp = Input(Vec(n, new BP))
val pc = Input(UInt(vaddrBits.W))
val ea = Input(UInt(vaddrBits.W))
val mcontext = Input(UInt(coreParams.mcontextWidth.W))
val scontext = Input(UInt(coreParams.scontextWidth.W))
val xcpt_if = Output(Bool())
val xcpt_ld = Output(Bool())
val xcpt_st = Output(Bool())
val debug_if = Output(Bool())
val debug_ld = Output(Bool())
val debug_st = Output(Bool())
val bpwatch = Output(Vec(n, new BPWatch(1)))
})
io.xcpt_if := false.B
io.xcpt_ld := false.B
io.xcpt_st := false.B
io.debug_if := false.B
io.debug_ld := false.B
io.debug_st := false.B
(io.bpwatch zip io.bp).foldLeft((true.B, true.B, true.B)) { case ((ri, wi, xi), (bpw, bp)) =>
val en = bp.control.enabled(io.status)
val cx = bp.contextMatch(io.mcontext, io.scontext)
val r = en && bp.control.r && bp.addressMatch(io.ea) && cx
val w = en && bp.control.w && bp.addressMatch(io.ea) && cx
val x = en && bp.control.x && bp.addressMatch(io.pc) && cx
val end = !bp.control.chain
val action = bp.control.action
bpw.action := action
bpw.valid(0) := false.B
bpw.rvalid(0) := false.B
bpw.wvalid(0) := false.B
bpw.ivalid(0) := false.B
when (end && r && ri) { io.xcpt_ld := (action === 0.U); io.debug_ld := (action === 1.U); bpw.valid(0) := true.B; bpw.rvalid(0) := true.B }
when (end && w && wi) { io.xcpt_st := (action === 0.U); io.debug_st := (action === 1.U); bpw.valid(0) := true.B; bpw.wvalid(0) := true.B }
when (end && x && xi) { io.xcpt_if := (action === 0.U); io.debug_if := (action === 1.U); bpw.valid(0) := true.B; bpw.ivalid(0) := true.B }
(end || r, end || w, end || x)
}
}
| module BreakpointUnit_2( // @[Breakpoint.scala:79:7]
input clock, // @[Breakpoint.scala:79:7]
input reset, // @[Breakpoint.scala:79:7]
input io_status_debug, // @[Breakpoint.scala:80:14]
input io_status_cease, // @[Breakpoint.scala:80:14]
input io_status_wfi, // @[Breakpoint.scala:80:14]
input [31:0] io_status_isa, // @[Breakpoint.scala:80:14]
input [1:0] io_status_dprv, // @[Breakpoint.scala:80:14]
input io_status_dv, // @[Breakpoint.scala:80:14]
input [1:0] io_status_prv, // @[Breakpoint.scala:80:14]
input io_status_v, // @[Breakpoint.scala:80:14]
input io_status_sd, // @[Breakpoint.scala:80:14]
input io_status_mpv, // @[Breakpoint.scala:80:14]
input io_status_gva, // @[Breakpoint.scala:80:14]
input io_status_tsr, // @[Breakpoint.scala:80:14]
input io_status_tw, // @[Breakpoint.scala:80:14]
input io_status_tvm, // @[Breakpoint.scala:80:14]
input io_status_mxr, // @[Breakpoint.scala:80:14]
input io_status_sum, // @[Breakpoint.scala:80:14]
input io_status_mprv, // @[Breakpoint.scala:80:14]
input [1:0] io_status_fs, // @[Breakpoint.scala:80:14]
input [1:0] io_status_mpp, // @[Breakpoint.scala:80:14]
input io_status_spp, // @[Breakpoint.scala:80:14]
input io_status_mpie, // @[Breakpoint.scala:80:14]
input io_status_spie, // @[Breakpoint.scala:80:14]
input io_status_mie, // @[Breakpoint.scala:80:14]
input io_status_sie, // @[Breakpoint.scala:80:14]
input io_bp_0_control_dmode, // @[Breakpoint.scala:80:14]
input io_bp_0_control_action, // @[Breakpoint.scala:80:14]
input [1:0] io_bp_0_control_tmatch, // @[Breakpoint.scala:80:14]
input io_bp_0_control_m, // @[Breakpoint.scala:80:14]
input io_bp_0_control_s, // @[Breakpoint.scala:80:14]
input io_bp_0_control_u, // @[Breakpoint.scala:80:14]
input io_bp_0_control_x, // @[Breakpoint.scala:80:14]
input io_bp_0_control_w, // @[Breakpoint.scala:80:14]
input io_bp_0_control_r, // @[Breakpoint.scala:80:14]
input [38:0] io_bp_0_address, // @[Breakpoint.scala:80:14]
input [47:0] io_bp_0_textra_pad2, // @[Breakpoint.scala:80:14]
input io_bp_0_textra_pad1, // @[Breakpoint.scala:80:14]
input [38:0] io_pc, // @[Breakpoint.scala:80:14]
input [38:0] io_ea, // @[Breakpoint.scala:80:14]
output io_xcpt_if, // @[Breakpoint.scala:80:14]
output io_xcpt_ld, // @[Breakpoint.scala:80:14]
output io_xcpt_st, // @[Breakpoint.scala:80:14]
output io_debug_if, // @[Breakpoint.scala:80:14]
output io_debug_ld, // @[Breakpoint.scala:80:14]
output io_debug_st, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_rvalid_0, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_wvalid_0, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_ivalid_0 // @[Breakpoint.scala:80:14]
);
wire io_status_debug_0 = io_status_debug; // @[Breakpoint.scala:79:7]
wire io_status_cease_0 = io_status_cease; // @[Breakpoint.scala:79:7]
wire io_status_wfi_0 = io_status_wfi; // @[Breakpoint.scala:79:7]
wire [31:0] io_status_isa_0 = io_status_isa; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_dprv_0 = io_status_dprv; // @[Breakpoint.scala:79:7]
wire io_status_dv_0 = io_status_dv; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_prv_0 = io_status_prv; // @[Breakpoint.scala:79:7]
wire io_status_v_0 = io_status_v; // @[Breakpoint.scala:79:7]
wire io_status_sd_0 = io_status_sd; // @[Breakpoint.scala:79:7]
wire io_status_mpv_0 = io_status_mpv; // @[Breakpoint.scala:79:7]
wire io_status_gva_0 = io_status_gva; // @[Breakpoint.scala:79:7]
wire io_status_tsr_0 = io_status_tsr; // @[Breakpoint.scala:79:7]
wire io_status_tw_0 = io_status_tw; // @[Breakpoint.scala:79:7]
wire io_status_tvm_0 = io_status_tvm; // @[Breakpoint.scala:79:7]
wire io_status_mxr_0 = io_status_mxr; // @[Breakpoint.scala:79:7]
wire io_status_sum_0 = io_status_sum; // @[Breakpoint.scala:79:7]
wire io_status_mprv_0 = io_status_mprv; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_fs_0 = io_status_fs; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_mpp_0 = io_status_mpp; // @[Breakpoint.scala:79:7]
wire io_status_spp_0 = io_status_spp; // @[Breakpoint.scala:79:7]
wire io_status_mpie_0 = io_status_mpie; // @[Breakpoint.scala:79:7]
wire io_status_spie_0 = io_status_spie; // @[Breakpoint.scala:79:7]
wire io_status_mie_0 = io_status_mie; // @[Breakpoint.scala:79:7]
wire io_status_sie_0 = io_status_sie; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_dmode_0 = io_bp_0_control_dmode; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_action_0 = io_bp_0_control_action; // @[Breakpoint.scala:79:7]
wire [1:0] io_bp_0_control_tmatch_0 = io_bp_0_control_tmatch; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_m_0 = io_bp_0_control_m; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_s_0 = io_bp_0_control_s; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_u_0 = io_bp_0_control_u; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_x_0 = io_bp_0_control_x; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_w_0 = io_bp_0_control_w; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_r_0 = io_bp_0_control_r; // @[Breakpoint.scala:79:7]
wire [38:0] io_bp_0_address_0 = io_bp_0_address; // @[Breakpoint.scala:79:7]
wire [47:0] io_bp_0_textra_pad2_0 = io_bp_0_textra_pad2; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_pad1_0 = io_bp_0_textra_pad1; // @[Breakpoint.scala:79:7]
wire [38:0] io_pc_0 = io_pc; // @[Breakpoint.scala:79:7]
wire [38:0] io_ea_0 = io_ea; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_sxl = 2'h2; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_uxl = 2'h2; // @[Breakpoint.scala:79:7, :80:14]
wire cx = 1'h1; // @[Breakpoint.scala:55:126]
wire end_0 = 1'h1; // @[Breakpoint.scala:109:15]
wire [39:0] io_bp_0_control_reserved = 40'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [5:0] io_bp_0_control_maskmax = 6'h4; // @[Breakpoint.scala:79:7, :80:14]
wire [3:0] io_bp_0_control_ttype = 4'h2; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_xs = 2'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_vs = 2'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_bp_0_control_zero = 2'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [7:0] io_status_zero1 = 8'h0; // @[Breakpoint.scala:79:7, :80:14]
wire io_status_mbe = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_sbe = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_sd_rv32 = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_ube = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_upie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_hie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_uie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_chain = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_h = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_mselect = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_sselect = 1'h0; // @[Breakpoint.scala:79:7]
wire [22:0] io_status_zero2 = 23'h0; // @[Breakpoint.scala:79:7, :80:14]
wire _io_debug_ld_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:84]
wire _io_debug_st_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :119:84]
wire _io_debug_if_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :120:84]
wire r; // @[Breakpoint.scala:106:58]
wire w; // @[Breakpoint.scala:107:58]
wire x; // @[Breakpoint.scala:108:58]
wire io_bpwatch_0_valid_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_rvalid_0_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_wvalid_0_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_ivalid_0_0; // @[Breakpoint.scala:79:7]
wire [2:0] io_bpwatch_0_action; // @[Breakpoint.scala:79:7]
wire io_xcpt_if_0; // @[Breakpoint.scala:79:7]
wire io_xcpt_ld_0; // @[Breakpoint.scala:79:7]
wire io_xcpt_st_0; // @[Breakpoint.scala:79:7]
wire io_debug_if_0; // @[Breakpoint.scala:79:7]
wire io_debug_ld_0; // @[Breakpoint.scala:79:7]
wire io_debug_st_0; // @[Breakpoint.scala:79:7]
wire _en_T = ~io_status_debug_0; // @[Breakpoint.scala:30:35, :79:7]
wire [1:0] en_lo = {io_bp_0_control_s_0, io_bp_0_control_u_0}; // @[Breakpoint.scala:30:56, :79:7]
wire [1:0] en_hi = {io_bp_0_control_m_0, 1'h0}; // @[Breakpoint.scala:30:56, :79:7]
wire [3:0] _en_T_1 = {en_hi, en_lo}; // @[Breakpoint.scala:30:56]
wire [3:0] _en_T_2 = _en_T_1 >> io_status_prv_0; // @[Breakpoint.scala:30:{56,68}, :79:7]
wire _en_T_3 = _en_T_2[0]; // @[Breakpoint.scala:30:68]
wire en = _en_T & _en_T_3; // @[Breakpoint.scala:30:{35,50,68}]
wire _r_T = en & io_bp_0_control_r_0; // @[Breakpoint.scala:30:50, :79:7, :106:16]
wire _r_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _w_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _x_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _GEN = io_ea_0 >= io_bp_0_address_0; // @[Breakpoint.scala:65:8, :79:7]
wire _r_T_2; // @[Breakpoint.scala:65:8]
assign _r_T_2 = _GEN; // @[Breakpoint.scala:65:8]
wire _w_T_2; // @[Breakpoint.scala:65:8]
assign _w_T_2 = _GEN; // @[Breakpoint.scala:65:8]
wire _r_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _r_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _r_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _w_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _w_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _w_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _x_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _x_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _x_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _r_T_4 = _r_T_2 ^ _r_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [38:0] _r_T_5 = ~io_ea_0; // @[Breakpoint.scala:62:6, :79:7]
wire _r_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_8 = _r_T_6 & _r_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _r_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_10 = _r_T_8 & _r_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _r_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_12 = _r_T_10 & _r_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] r_lo = {_r_T_8, _r_T_6}; // @[package.scala:45:27]
wire [1:0] r_hi = {_r_T_12, _r_T_10}; // @[package.scala:45:27]
wire [3:0] _r_T_13 = {r_hi, r_lo}; // @[package.scala:45:27]
wire [38:0] _r_T_14 = {_r_T_5[38:4], _r_T_5[3:0] | _r_T_13}; // @[package.scala:45:27]
wire [38:0] _r_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _r_T_18 = _r_T_16 & _r_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _r_T_20 = _r_T_18 & _r_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _r_T_22 = _r_T_20 & _r_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] r_lo_1 = {_r_T_18, _r_T_16}; // @[package.scala:45:27]
wire [1:0] r_hi_1 = {_r_T_22, _r_T_20}; // @[package.scala:45:27]
wire [3:0] _r_T_23 = {r_hi_1, r_lo_1}; // @[package.scala:45:27]
wire [38:0] _r_T_24 = {_r_T_15[38:4], _r_T_15[3:0] | _r_T_23}; // @[package.scala:45:27]
wire _r_T_25 = _r_T_14 == _r_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _r_T_26 = _r_T_1 ? _r_T_4 : _r_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _r_T_27 = _r_T & _r_T_26; // @[Breakpoint.scala:68:8, :106:{16,32}]
assign r = _r_T_27; // @[Breakpoint.scala:106:{32,58}]
assign io_bpwatch_0_rvalid_0_0 = r; // @[Breakpoint.scala:79:7, :106:58]
wire _w_T = en & io_bp_0_control_w_0; // @[Breakpoint.scala:30:50, :79:7, :107:16]
wire _w_T_4 = _w_T_2 ^ _w_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [38:0] _w_T_5 = ~io_ea_0; // @[Breakpoint.scala:62:6, :79:7]
wire _w_T_8 = _w_T_6 & _w_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _w_T_10 = _w_T_8 & _w_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _w_T_12 = _w_T_10 & _w_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] w_lo = {_w_T_8, _w_T_6}; // @[package.scala:45:27]
wire [1:0] w_hi = {_w_T_12, _w_T_10}; // @[package.scala:45:27]
wire [3:0] _w_T_13 = {w_hi, w_lo}; // @[package.scala:45:27]
wire [38:0] _w_T_14 = {_w_T_5[38:4], _w_T_5[3:0] | _w_T_13}; // @[package.scala:45:27]
wire [38:0] _w_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _w_T_18 = _w_T_16 & _w_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _w_T_20 = _w_T_18 & _w_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _w_T_22 = _w_T_20 & _w_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] w_lo_1 = {_w_T_18, _w_T_16}; // @[package.scala:45:27]
wire [1:0] w_hi_1 = {_w_T_22, _w_T_20}; // @[package.scala:45:27]
wire [3:0] _w_T_23 = {w_hi_1, w_lo_1}; // @[package.scala:45:27]
wire [38:0] _w_T_24 = {_w_T_15[38:4], _w_T_15[3:0] | _w_T_23}; // @[package.scala:45:27]
wire _w_T_25 = _w_T_14 == _w_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _w_T_26 = _w_T_1 ? _w_T_4 : _w_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _w_T_27 = _w_T & _w_T_26; // @[Breakpoint.scala:68:8, :107:{16,32}]
assign w = _w_T_27; // @[Breakpoint.scala:107:{32,58}]
assign io_bpwatch_0_wvalid_0_0 = w; // @[Breakpoint.scala:79:7, :107:58]
wire _x_T = en & io_bp_0_control_x_0; // @[Breakpoint.scala:30:50, :79:7, :108:16]
wire _x_T_2 = io_pc_0 >= io_bp_0_address_0; // @[Breakpoint.scala:65:8, :79:7]
wire _x_T_4 = _x_T_2 ^ _x_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [38:0] _x_T_5 = ~io_pc_0; // @[Breakpoint.scala:62:6, :79:7]
wire _x_T_8 = _x_T_6 & _x_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _x_T_10 = _x_T_8 & _x_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _x_T_12 = _x_T_10 & _x_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] x_lo = {_x_T_8, _x_T_6}; // @[package.scala:45:27]
wire [1:0] x_hi = {_x_T_12, _x_T_10}; // @[package.scala:45:27]
wire [3:0] _x_T_13 = {x_hi, x_lo}; // @[package.scala:45:27]
wire [38:0] _x_T_14 = {_x_T_5[38:4], _x_T_5[3:0] | _x_T_13}; // @[package.scala:45:27]
wire [38:0] _x_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _x_T_18 = _x_T_16 & _x_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _x_T_20 = _x_T_18 & _x_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _x_T_22 = _x_T_20 & _x_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] x_lo_1 = {_x_T_18, _x_T_16}; // @[package.scala:45:27]
wire [1:0] x_hi_1 = {_x_T_22, _x_T_20}; // @[package.scala:45:27]
wire [3:0] _x_T_23 = {x_hi_1, x_lo_1}; // @[package.scala:45:27]
wire [38:0] _x_T_24 = {_x_T_15[38:4], _x_T_15[3:0] | _x_T_23}; // @[package.scala:45:27]
wire _x_T_25 = _x_T_14 == _x_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _x_T_26 = _x_T_1 ? _x_T_4 : _x_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _x_T_27 = _x_T & _x_T_26; // @[Breakpoint.scala:68:8, :108:{16,32}]
assign x = _x_T_27; // @[Breakpoint.scala:108:{32,58}]
assign io_bpwatch_0_ivalid_0_0 = x; // @[Breakpoint.scala:79:7, :108:58]
assign io_bpwatch_0_action = {2'h0, io_bp_0_control_action_0}; // @[Breakpoint.scala:79:7, :80:14, :112:16]
wire _io_xcpt_ld_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51]
assign io_xcpt_ld_0 = r & _io_xcpt_ld_T; // @[Breakpoint.scala:79:7, :97:14, :106:58, :118:{27,40,51}]
assign io_debug_ld_0 = r & _io_debug_ld_T; // @[Breakpoint.scala:79:7, :100:15, :106:58, :118:{27,73,84}]
wire _io_xcpt_st_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51, :119:51]
assign io_xcpt_st_0 = w & _io_xcpt_st_T; // @[Breakpoint.scala:79:7, :98:14, :107:58, :119:{27,40,51}]
assign io_debug_st_0 = w & _io_debug_st_T; // @[Breakpoint.scala:79:7, :101:15, :107:58, :119:{27,73,84}]
wire _io_xcpt_if_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51, :120:51]
assign io_xcpt_if_0 = x & _io_xcpt_if_T; // @[Breakpoint.scala:79:7, :96:14, :108:58, :120:{27,40,51}]
assign io_debug_if_0 = x & _io_debug_if_T; // @[Breakpoint.scala:79:7, :99:15, :108:58, :120:{27,73,84}]
assign io_bpwatch_0_valid_0 = x | w | r; // @[Breakpoint.scala:79:7, :106:58, :107:58, :108:58, :118:27, :119:{27,107}, :120:{27,107}]
assign io_xcpt_if = io_xcpt_if_0; // @[Breakpoint.scala:79:7]
assign io_xcpt_ld = io_xcpt_ld_0; // @[Breakpoint.scala:79:7]
assign io_xcpt_st = io_xcpt_st_0; // @[Breakpoint.scala:79:7]
assign io_debug_if = io_debug_if_0; // @[Breakpoint.scala:79:7]
assign io_debug_ld = io_debug_ld_0; // @[Breakpoint.scala:79:7]
assign io_debug_st = io_debug_st_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_rvalid_0 = io_bpwatch_0_rvalid_0_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_wvalid_0 = io_bpwatch_0_wvalid_0_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_ivalid_0 = io_bpwatch_0_ivalid_0_0; // @[Breakpoint.scala:79:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundRawFNToRecFN_e8_s24_3( // @[RoundAnyRawFNToRecFN.scala:295:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_infiniteExc, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_infiniteExc_0 = io_infiniteExc; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_3 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15]
.io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_infiniteExc (io_infiniteExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_roundingMode (io_roundingMode_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags_0)
); // @[RoundAnyRawFNToRecFN.scala:310:15]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_173( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
output io_q // @[ShiftReg.scala:36:14]
);
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_309 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_14( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_21 io_out_source_extend ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.